diff --git a/.gitattributes b/.gitattributes index 0a8b6f606e16b852b1b2c5db3edbecc8e1eae0cf..600886d2cb22aced49b1ee2a8e7d2c57c59af4c2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3987,3 +3987,74 @@ q9AzT4oBgHgl3EQfA_op/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex NNFRT4oBgHgl3EQfGTcp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text UtAzT4oBgHgl3EQfX_xg/content/2301.01327v1.pdf filter=lfs diff=lfs merge=lfs -text 69AzT4oBgHgl3EQfEvpR/content/2301.00998v1.pdf filter=lfs diff=lfs merge=lfs -text +3tAyT4oBgHgl3EQfb_f6/content/2301.00276v1.pdf filter=lfs diff=lfs merge=lfs -text +GNE0T4oBgHgl3EQfhQF8/content/2301.02429v1.pdf filter=lfs diff=lfs merge=lfs -text +DdAzT4oBgHgl3EQfwf7y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +CtE0T4oBgHgl3EQfQQAs/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +UtAzT4oBgHgl3EQfX_xg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +mNE_T4oBgHgl3EQf6xyS/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +NtE3T4oBgHgl3EQfwwva/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +rdE3T4oBgHgl3EQfMgkz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +vNAyT4oBgHgl3EQfafc7/content/2301.00242v1.pdf filter=lfs diff=lfs merge=lfs -text +qdE2T4oBgHgl3EQf0gjz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +89E0T4oBgHgl3EQfwgGc/content/2301.02634v1.pdf filter=lfs diff=lfs merge=lfs -text +RdE3T4oBgHgl3EQfygvM/content/2301.04721v1.pdf filter=lfs diff=lfs merge=lfs -text +DdAzT4oBgHgl3EQfwf7y/content/2301.01725v1.pdf filter=lfs diff=lfs merge=lfs -text +jdE1T4oBgHgl3EQfNQNY/content/2301.02999v1.pdf filter=lfs diff=lfs merge=lfs -text +CtE0T4oBgHgl3EQfQQAs/content/2301.02189v1.pdf filter=lfs diff=lfs merge=lfs -text +3NFST4oBgHgl3EQfYjhQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +6NE4T4oBgHgl3EQf1g37/content/2301.05292v1.pdf filter=lfs diff=lfs merge=lfs -text +WNE2T4oBgHgl3EQfDgYF/content/2301.03624v1.pdf filter=lfs diff=lfs merge=lfs -text +69AzT4oBgHgl3EQfEvpR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +hNE3T4oBgHgl3EQf4AvA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +vNAyT4oBgHgl3EQfafc7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +3NAyT4oBgHgl3EQfo_gV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +wdE3T4oBgHgl3EQflQpC/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +cNE0T4oBgHgl3EQf4wL5/content/2301.02744v1.pdf filter=lfs diff=lfs merge=lfs -text +3tAyT4oBgHgl3EQfb_f6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +5NFIT4oBgHgl3EQf7itm/content/2301.11398v1.pdf filter=lfs diff=lfs merge=lfs -text +wdE3T4oBgHgl3EQflQpC/content/2301.04604v1.pdf filter=lfs diff=lfs merge=lfs -text +ydE2T4oBgHgl3EQf3wjf/content/2301.04175v1.pdf filter=lfs diff=lfs merge=lfs -text +stE_T4oBgHgl3EQf8xx6/content/2301.08377v1.pdf filter=lfs diff=lfs merge=lfs -text +zdAyT4oBgHgl3EQfbPea/content/2301.00259v1.pdf filter=lfs diff=lfs merge=lfs -text +GNAyT4oBgHgl3EQfSvfA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +6NE4T4oBgHgl3EQf1g37/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ptAyT4oBgHgl3EQfzflz/content/2301.00702v1.pdf filter=lfs diff=lfs merge=lfs -text +89E0T4oBgHgl3EQfwgGc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ntE2T4oBgHgl3EQfJwYe/content/2301.03694v1.pdf filter=lfs diff=lfs merge=lfs -text +JdAyT4oBgHgl3EQfsPlo/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +a9E4T4oBgHgl3EQfoQ0b/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +bNAyT4oBgHgl3EQfivjL/content/2301.00403v1.pdf filter=lfs diff=lfs merge=lfs -text +jdA0T4oBgHgl3EQfIv9Y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +aNE3T4oBgHgl3EQfdAo0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +zdAyT4oBgHgl3EQfbPea/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ZtAyT4oBgHgl3EQfvvnv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +cNE0T4oBgHgl3EQf4wL5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +RdA0T4oBgHgl3EQfDv9U/content/2301.02007v1.pdf filter=lfs diff=lfs merge=lfs -text +mtFPT4oBgHgl3EQf4zVh/content/2301.13194v1.pdf filter=lfs diff=lfs merge=lfs -text +a9FLT4oBgHgl3EQfXi8l/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ZtAyT4oBgHgl3EQfvvnv/content/2301.00638v1.pdf filter=lfs diff=lfs merge=lfs -text +HNA0T4oBgHgl3EQfBv_D/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +b9FPT4oBgHgl3EQfBTTg/content/2301.12985v1.pdf filter=lfs diff=lfs merge=lfs -text +T9E4T4oBgHgl3EQfmg0h/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ntE2T4oBgHgl3EQfJwYe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +HNA0T4oBgHgl3EQfBv_D/content/2301.01981v1.pdf filter=lfs diff=lfs merge=lfs -text +VtAyT4oBgHgl3EQfV_ek/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +9tFJT4oBgHgl3EQfoyxM/content/2301.11597v1.pdf filter=lfs diff=lfs merge=lfs -text +ldAyT4oBgHgl3EQfk_iC/content/2301.00444v1.pdf filter=lfs diff=lfs merge=lfs -text +jdA0T4oBgHgl3EQfIv9Y/content/2301.02079v1.pdf filter=lfs diff=lfs merge=lfs -text +T9E4T4oBgHgl3EQfmg0h/content/2301.05168v1.pdf filter=lfs diff=lfs merge=lfs -text +vdE4T4oBgHgl3EQfXAw5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +5NFIT4oBgHgl3EQf7itm/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +c9FIT4oBgHgl3EQfnivD/content/2301.11315v1.pdf filter=lfs diff=lfs merge=lfs -text +XNAyT4oBgHgl3EQfh_h5/content/2301.00387v1.pdf filter=lfs diff=lfs merge=lfs -text +3NAyT4oBgHgl3EQfo_gV/content/2301.00515v1.pdf filter=lfs diff=lfs merge=lfs -text +c9FIT4oBgHgl3EQfnivD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +bdFST4oBgHgl3EQfCjh5/content/2301.13707v1.pdf filter=lfs diff=lfs merge=lfs -text +xdE0T4oBgHgl3EQf-gIr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +stFKT4oBgHgl3EQf1y5_/content/2301.11921v1.pdf filter=lfs diff=lfs merge=lfs -text +XNAyT4oBgHgl3EQfh_h5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +stFKT4oBgHgl3EQf1y5_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +NtE3T4oBgHgl3EQfwwva/content/2301.04706v1.pdf filter=lfs diff=lfs merge=lfs -text +a9E4T4oBgHgl3EQfoQ0b/content/2301.05182v1.pdf filter=lfs diff=lfs merge=lfs -text +9tFJT4oBgHgl3EQfoyxM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text diff --git a/0dFQT4oBgHgl3EQf0Daw/content/tmp_files/2301.13415v1.pdf.txt b/0dFQT4oBgHgl3EQf0Daw/content/tmp_files/2301.13415v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..1e26f5a6568fdc580f2a1a4e673bdeea3c1c0475 --- /dev/null +++ b/0dFQT4oBgHgl3EQf0Daw/content/tmp_files/2301.13415v1.pdf.txt @@ -0,0 +1,1215 @@ +LOGAI: A LIBRARY FOR LOG ANALYTICS AND INTELLIGENCE +Qian Cheng, Amrita Saha, Wenzhuo Yang, Chenghao Liu, Doyen Sahoo, Steven Hoi +Salesforce AI Research +{qcheng, amrita.saha, wenzhuo.yang, chenghao.liu, dsahoo, shoi}@salesforce.com +ABSTRACT +Software and System logs record runtime information about processes executing within a system. +These logs have become the most critical and ubiquitous forms of observability data that help +developers understand system behavior, monitor system health and resolve issues. However, the +volume of logs generated can be humongous (of the order of petabytes per day) especially for complex +distributed systems, such as cloud, search engine, social media, etc. This has propelled a lot of +research on developing AI-based log based analytics and intelligence solutions that can process +huge volume of raw logs and generate insights. In order to enable users to perform multiple types +of AI-based log analysis tasks in a uniform manner, we introduce LogAI (https://github.com/ +salesforce/logai), a one-stop open source library for log analytics and intelligence. LogAI +supports tasks such as log summarization, log clustering and log anomaly detection. It adopts the +OpenTelemetry data model, to enable compatibility with different log management platforms. LogAI +provides a unified model interface and provides popular time-series, statistical learning and deep +learning models. Alongside this, LogAI also provides an out-of-the-box GUI for users to conduct +interactive analysis. With LogAI, we can also easily benchmark popular deep learning algorithms for +log anomaly detection without putting in redundant effort to process the logs. We have opensourced +LogAI to cater to a wide range of applications benefiting both academic research and industrial +prototyping. +Keywords Log Analysis · Machine Learning · Anomaly Detection · Clustering · Artifical Intelligence · AIOps +1 +Introduction +System and Software logs are text messages that are embedded by software and application developers in the source +code and are designed to carry useful runtime information about the process, which are typically dumped as raw log +files, once the system starts executing. In modern computer systems, especially for large distributed systems that run +complex software, such as search engines, social network websites, and cloud platforms, logs are one of the most +critical observability data. Logs are widely used in a variety of operational tasks, covering use cases such as system +availability, reliability and security. In scenarios when users have no direct access to the physical servers, logs are often +the ground truth about the systems and applications. As such, Log Management has become a very important task in +the industrial landscape. In fact, log management market size grew to $2.29 billion in 2023, at a compound annual +growth rate (CAGR) of 15.9%, according to the report from The Business [1]. +Ideally, logs should be capturing the runtime information at a very granular level and stored permanently so that +when any disruptive incident occurs, developers and operators can always look up the correct log file and inspect the +log messages to debug what caused the incident. In reality though, because of the colossal size of the log dumps, +storing them permanently in the raw form is often impractical. This challenge can be mitigated with the help of large +cloud-based logging systems such as AWS Cloudwatch and Microsoft Azure Logs where it is possible to even store +the entire log data and retain them for a substantial period of time. Moreover, these logging systems also provide +capabilities to help efficient log querying and visualization, enabling developers and operators to quickly access the log +dumps or log streams of their software. With these capabilities, the main open question is, how to explore raw logs and +find the right set of logs associated with an incident? followed by a more advanced one - Is there a way to automatically +analyze the logs and tell if there are issues with a system, create incidents and provide additional insights? +arXiv:2301.13415v1 [cs.AI] 31 Jan 2023 + +Cheng et. al +Depending on which operational stage logs are involved in, the goal of log analysis in that specific situation could be +different. Logs can be used for incident detection, where reliability engineers and developers need to continuously +monitor the log streams in order to detect any unexpected behavior that might be indicative of an incident. For post +incident detection, log data can play a critical role in root-cause analysis, where operators examine the raw logs to +identify the loglines that show anomalous patterns and thus localize the anomaly and eventually the root cause of the +incident to a single service, component or module or a group of them. The situation becomes even more complex in +large distributed systems, where people (typically reliability engineers) who inspect the logs to resolve incidents may +not necessarily be the same group of people (i.e. software and application developers) who write the logging statements +in software code. In these situations, understanding even simple dump logs can take significant amount of time and +effort, owing to the open-ended nature of the log data. +Over the past decade there have been various effort targeted at developing both commercial and open-source software +to cater to automated log analysis. Though, most of the initial work used either domain specific rules or heuristics, +with the proliferation of AI and ML, more and more data-driven techniques have been adopted and popularized in this +community. However, most of the AI-driven effort has been applied in an isolated manner, focusing on specific log +analysis tasks (like how to extract structure out of the raw logs or how to detect anomaly patterns in it). There is still an +urgent need for bringing together all the AI, ML and NLP techniques to a unified platform that can cater to the entire +suite of different log analysis tasks. Nevertheless, creating such a one-stop library to serve a diverse set of log-based +analytics can be quite non-trivial, with some of the potential challenges being, as follows: +• Lack of unified log data model for log analysis. Different logs are in different formats and as a result +analysis tools need to be customized for different log formats and schemas. It is not easy to generalize +analytical algorithms without a unified data model that can handle heterogenous forms of log data. +• Redundant effort in data preprocessing and information extraction. The current status of log analytics +in this community is that there is a lack of a consolidated pipeline for data preprocessing and information +extraction across all log analysis models and tasks - i.e. different log analysis algorithms have been implemented +independently, with each adopting their own pipelines and workflows. For different tasks, or even different +algorithms of the same task, developers need to implement multiple redundant preprocessing and information +extraction process modules. +• Difficulty in managing log analysis experiments and benchmarking. Empirical benchmarking forms a +critical part of research and applied science. In the existing literature, there is no unified workflow management +mechanism to run log analysis benchmarking experiments. For example, while there has been some isolated +pockets of deep learning research for log anomaly detection, it is quite challenging for other organizations or +users to adopt them or reproduce their experimental results, due to the lack of a common unified framework +for log analysis. +In this inter-disciplinary community of AIOps, users may have different needs while working on log analysis in +academic and industrial settings when they are in different roles. For example, 1) Machine learning researchers may +need a hassle-free way to perform benchmarking experiments on public log datasets and reproduce the experimental +results from peer research groups in order to develop new log analysis algorithms; 2) Industrial data scientists and +AIOps practitioners may need an intuitive workflow to quickly experiment with existing log analysis algorithms on +their own log data and select the best performing algorithm, hyperparameters and experimental configurations as their +log analysis solution, and 3) Data and software engineers need to integrate the selected algorithm into production and +deploy them in a smooth and efficient way. Unfortunately, we realize there is no existing open source toolkit that can +satisfy all the above needs. +We are thus motivated to develop a holistic LogAI solution - a python library aimed for conducting AI-based log +analytics and intelligence tasks to serve a variety of academic and industrial use-cases. LogAI (https://github. +com/salesforce/logai) provides a unified way to conduct various of log analysis tasks such as log summarization, +clustering, anomaly detection. LogAI also provides a unified data model, inheriting from OpenTelemetry log data +model, to handle logs in different formats. LogAI is also the first open source log analytics library that incorporate +time-series algorithms, statistical learning algorithms and deep learning algorithms. Moreover, LogAI implemented an +out-of-the-box GUI portal to conduct log analysis in interactive way, more naturally align with the user experience of +real-world log analysis. +Besides, in this technical report we also demonstrate how to use LogAI to easily benchmark deep learning algorithms +for log anomaly detection without any redundant effort in log preprocessing and cleaning. In this community, there are +existing libraries like LogLizer and Deep-Loglizer [2, 3] which have consolidated some of the AI/ML effort for the log +domain. However, they still suffer from a few limitations - for example lacking a unified data processing pipeline that is +generic across all tasks or algorithms or catering to only anomaly detection as the log analysis task or covering only a +2 + +Cheng et. al +specific types of algorithms. In Section 5, we elaborate on the limitations of these existing libraries and also show how +LogAI provides a more intuitive framework for designing and managing the experimental settings while performing +comparable to Deep-Loglizer. +2 +Related Work +Recently, researchers and engineers have been working on a variety of problems about automated log analysis in +academia and industry [4]. Based on the existing solutions, we can summarize a common workflow to conduct +automated log analysis. The common workflow contains four steps: log collection, log cleaning and preprocessing, log +information extraction and log analysis and intelligence applications, Figure 1. Log collection is the data loading step +that collects logs from local log dump files or log management platforms. Log cleaning and preprocessing is the step +to use predefined rules and domain knowledge to clean noisy log data, remove or replace known log templates. This +step usually does not involve any ML process. Log information extraction is the step where ML models are involved +to extract information from log data, and feed the log representation or features to train ML models for analytics and +intelligence application tasks. Log information extraction usually contains several steps like log parsing, partitioning, +feature extraction, etc. The final step, log analytics and intelligence, is to train ML models for a specific log downstream +task. For example, log clustering and summarization are common log analytics tasks, while log based anomaly detection +and root-cause analysis are common log intelligence tasks. +Figure 1: Common Log Analytics and Intelligence Workflow. The common workflow contains four steps: 1) log +collection from local log files or log platforms, 2) log cleaning and preprocessing, 3) log information extraction and 4) +log analytics tasks (such as clustering and summarization) and log intelligence tasks (such as anomaly detection and +root-cause analysis). +Log analysis has a very long history and there are a lot of tools for log analysis. Almost all commercial log management +software/SaaS have associated log analysis/ log insights offerings. This includes log management products such as +Splunk, DataDog, NewRelic, etc., as well as cloud providers such as Amazon AWS, Microsoft Azure and Google +Cloud. In open source community, there are also very popular log management and analysis projects such as GreyLogs, +Grafana, Prometheus, etc. However, neither these commercial log management platform nor open-source log analysis +tools are incorporated with comprehensive AI techniques such as deep learning, large language models (LLM), BERT, +etc. +Meanwhile, there are a few open-source AI-based log analysis tools that started to support more comprehensive AI +techniques. For example, LOGPAI (https://github.com/logpai/) is one of the most famous log anaysis community on +GitHub. LOGPAI provides logparser for automated log parsing. LOGPAI also provides loglizer [5] and deep-loglizer [6] +for log anomaly detection. Besides LOGPAI, there are other open-source projects, most of which are open source code +from research outcomes, such as LogClass and Log2Vec from NetManAIOps (https://github.com/orgs/NetManAIOps). +3 +Design Principles +In this section we discuss about the design principles of LogAI library. LogAI provides a unified framework for log +analysis. In order to achieve this, LogAI follows the following design principles: 1) high compatibility with data from +different log sources, 2) reusable components to avoid reproducing effort, 3) unified setup process for customized +applications and 4) easy-to-use GUI for out-of-box interactive log analysis. +3 + +Log Information +Log Analytics and +Log Collection +Log Cleaning and +Extraction +Intelligence +Preprocessing +: From local files +: Cleaning Noisy Data +• Log Parsing +: Analytics: +: From log platforms +: Log Partitioning +。 Clustering +: Remove or Replace +Custom Log +. Feature Extraction + Summarization +Templates +: Intelligence +: Anomaly Detection +• Rootcause AnalysisCheng et. al +3.1 +Compatible with data from different log sources +One of the attractive qualities of log data is its open-ended form, where developers can design them to capture useful +runtime and performance information to any arbitrary level of granularity as per the needs of the application. Different +software can generate very different logs. Even in the same software, there are different levels of logs, such as service +logs, application logs, systems logs, etc. These logs can be in different formats, either structured, semi-structured or +unstructured. LogAI takes these factors into consideration and ensures that the data loader can consume and process +these heterogeneous types of logs in a seamless way, by converting these logs into log record with unified log data +model. +3.2 +Reusable components to avoid duplicated effort +As briefly motivated in Sec 1, a particular challenge of building log analytics in both academic and industrial settings, is +the lack of an unified framework that allows reusal of data processing and information extraction components across +different log analysis tasks, even on the same data source or dataset. For instance, engineers and researchers have to +build separate pipelines to perform log anomaly detection, log clustering or summarization even to deal with the same +log data source. This burden significantly impacts efficiency in every development stage. from experiments, prototyping +all the way to productization. Also running multiple pipelines in production increases the system complexity and brings +additional operational cost. Thus, building a library that unifies the interface of common components across multiple +downstream tasks is necessary to improve efficiency of all stages of log analysis. +3.3 +Unified setup process for customized applications +Even for the same application, the design choice behind the log analysis pipeline might have different variations, based +on the various needs or limitations of the use-case. For example, log anomaly detection may involve different steps in +the end-to-end (E2E) workflow. Some may include log parsing, while others might choose to skip this step either due +to the computational overhead or simply because the downstream analysis models do not need a well-defined parsed +structure. Also, when converting the raw log text data to machine-readable vectors there can be various choices - either +to convert log messages into time-series counter vectors or into event sequences by representing each log line as a id +or as a sequence of natural language tokens. In production setup, adding, removing or replacing a component in the +E2E workflow could be very time consuming. LogAI is designed to support building customized applications with +easy plug-in / plug-out components, enabling users to quickly try out various combinations through simple intuitive +mechanisms like configurable json or yaml files. +3.4 +Easy-to-use GUI for out-of-box interactive log analysis +Another learning while we work with different types of log data is about visual examination. Unlike many machine +learning domains where the model performance evaluation can heavily rely on metrics, such as Precision, Recall, +F-scores, log analysis tasks usually need more visual examination to validate the performance. Thus, LogAI is developed +with a graphic user interface (GUI), or a portal, to integrate with interactive analytical features for tasks such as log +summarization, clustering and anomaly detection. We believe this portal can reduce the cognitive overhead on the +LogAI users in onboarding to the library and help them execute the log analysis tasks quickly and intuitively. +4 +Architecture +LogAI is separated into the GUI module and core library module. The GUI module contains the implementation of a GUI +portal that talks to backend analysis applications. The portal is supported using Plotly Dash (https://plotly.com/dash/). +The core library module contains four main layers: data layer, pre-processing layer, information extraction layer and +analysis layer. Each layer contains the components to process logs in a standard way. LogAI applications, such as log +summarization, log clustering, unsupervised log anomaly detection, are created on top of the components of the four +layers. +4.1 +Core Library Modules +LogAI is implemented in the architecture described in Figure 2. In this section we describe the technical details of each +layer. Including the implementation of components and how the components communicate across layers. +4 + +Cheng et. al +Figure 2: LogAI Architecture +4.1.1 +Data Layer +Data layer contains two component classes: LogRecordObject class and DataLoader class. +LogRecordObject class defines the data model of log records. As we mentioned in Introduction, logs are free-form +text and can be unstructured or semi-structured. Even for structured logs, different software applications may name their +log data in different ways. LogRecordObject is to adapt log data from different sources to a more unified structure in +order to provide a data object that can be used in all follow-up processes without modification. In LogAI, data model +of LogRecordObject is a subset of the log and event record definition by OpenTelemetry (https://opentelemetry.io/), +containing fields in Table 1. +Table 1: LogRecordObject Data Model +Field +Description +Timestamp +Timestamp when event occurred. +Body +loglines or the content of log messages. +Attributes +a map for structured information of log record. +TraceId +Request trace id as defined in W3C Trace Context. Can be set for logs that are part of +request processing and have an assigned trace id. This field is optional. +SpanId +Trace flag as defined in W3C Trace Context specification. At the time of writing the +specification defines one flag - the SAMPLED flag. This field is optional. +SeverityText +String represents the severity. This field is optional. +SeverityNumber +Numeric values of severity, TRACE(1-4), DEBUG(5-8), INFO(9-12), WARN(13-16), +ERROR(17-20), FATAL(21-24). This field is optional. +Resource +Description of. the source of the log. +InstrumentationScope +Multiple occurrences of events coming from the same scope can happen across time and +they all have the same value of InstrumentationScope. +DataLoader is a class that implements functions to load data from sources. In current version we implement +FileDataLoader to load data from local files, e.g. .log,.csv,.tsv,.json. The associated DataLoaderConfig +class defines the configuration of how data will be loaded. load_data() method will load data from target source and +return LogRecordObject. In the future versions we will support data loaders with connectors to consume data directly +from log platforms such as Splunk, Datadog, AWS Cloudwatch, etc. +4.1.2 +Preprocessing Layer +Preprocessing. Preprocessor is a class to conduct logline level preprocessing. Users can initialize a preprocessor +instance with configuration and execute .clean_log() method to obtain cleaned loglines. The supported configuration +includes custom_delimiters_regex to parse logs with custom delimiters and custom_replace_list to identify +and replace the substrings that match regex patterns in this list, examples are show in Figure 3. +5 + +Data Layer +preprocessing Layer +Information Extraction Layer +Application Layer +Custom log +Log +datafiles +FileDataLoader +Preprocess +Auto-Parsing +Summarization +Loglines +Unstructured) +Open Log +OpenDataset +Log +Feature +Log +Datasets +Records +Vectorization +DataLoader +Extraction +Clustering +Attributes +(Structured) +Log streams +from log +Connector +Categorical +Log Anomaly +Partitioning +platforms +DataLoader +Encoding +DetectionCheng et. al +Figure 3: Example of preprocessor execution +Partitioning. Partitioner is a class that helps partitioning the logs. As part of the preprocessing, there are needs +to shuffle, concatenate and sequentialize raw logs into different forms, for example using time-based partitions or +identifier-based partitions or sliding window partitions of fixed lengths. This class provides optional functions for this +type of process. +4.1.3 +Information Extraction Layer +Information extraction layer contains modules to convert log records into vectors that can be used as input of machine +learning models for the actual analytical tasks. Current log analysis research and applications indicate three main +input data types are used in the ML approaches: 1) converting log records into counter vectors to use time-series ML +techniques, 2) converting log records into feature vectors to use tabular-based ML techniques and 3) converting log +records into sequences to use sequential ML techniques. +LogAI implemented four components in the information extraction layer to extract information from the log records +and convert logs to the target formats. Log parser component implements a series of automatic parsing algorithms +in order to extract templates from the input loglines. Log vectorizer implements a bag of vectorization algorithms to +convert free-form log text into numerical representations for each logline. Categorical encoder implements algorithms +that encoding categorical attributes into numerical representations for each logline. Last but not least, feature extractor +implements methods to group the logline level representation vectors into log event level representations. +Automated Log Parsing. LogParser is a class that conducts automated log parsing tasks. Currently LogAI covers +three automated log parsing algorithms: DRAIN[7], IPLoM[8] and AEL[9]. LogParser takes the unstructured logline +text as input and generate two sets of results: parsed_logline are the static pattern of all logs in this category, +parameter_list are the lists of values for each “*” position in the log pattern for the same set of loglines. +Log Vectorization. LogVectorizer is a class that converts unstructured loglines into semantic vectors. Each semantic +vector is an array of numeric values that represents this logline text. LogVectorizer supports popular text vectorization +algorithms such as TF-IDF [10], FastText [11], Word2Vec [12], etc. +Categorical Encoding. CategoricalEncoder is a class that encodes log attributes, the structured portion of logs. +The string type attributes will be transformed into categorical representations. CategoricalEncoder supports popular +categorical encoding algorithms such as label encoding, one-hot encoding, ordinal encoding etc. +Feature Extraction. FeatureExtractor is a class that conducts final transformation of raw log data into log feature +set that machine learning models can consume. In LogAI, we primarily cover three types of log features: 1) time-series +counters, 2) semantic feature sets and 3) sequence vectors. Time-series counters will be used to feed time-series models +such as ETS, ARIMA. Semantic feature set can be widely used in a variety of machine learning and deep learning +models. Sequence vectors are a specific type of feature format that are required by sequence-modeling based deep +learning methods, for example Recurrent Neural Network or Convolutional Neural Networks. +6 + +PreprocessorConfig +Raw logs +config =PreprocessorConfig( +20171223-22:15:29:615|Step_LSC|30002312|onExte.. +custom_delimiters_regex=[r"\l"], +20171223-22:15:29:633|Step_StandReportReceiver... +3 +20171223-22:15:29:635/StepLSC/30002312|proces.. +custom_replace_list=[ ++ +20171223-22:15:29:635|Step_StandStepCounter|30... +(r'Step_lw+','') +Clean logs +ParameterList +20171223-22:15:29:61530002312on... + +20171223-22:15:29:63330002312on... +[Step_LSC] +3 +20171223-22:15:29:635 30002312 pr... +2 +[StepStandReportReceiver] +20171223-22:15:29:63530002312 fl... +3 +[Step_LSC] +4 +[Step_StandStepCounter]Cheng et. al +4.1.4 +Analysis Layer +The analysis layer contains modules that conduct the analysis tasks, including but not limit to semantic anomaly +detector, time-series anomaly detector, sequence anomaly detector, clustering, etc. Each analysis module provides +unified interface for multiple underlying algorithms. +Anomaly Detection. AnomalyDetector is a class to conduct anomaly detection analysis to find abnormal logs from +semantic perspective. AnomalyDetector takes log features of the given logs as input. The output are the anomaly +scores. LogAI supports two different types of anomaly detection: 1) anomaly detection based on log counter vectors, +2) anomaly detection based on log semantic representations. The supported anomaly detection algorithms includes +univariate and multivariate time-series analysis algorithms from Merlion [13], unsupervised outlier detection models +like one-class SVM [14] and local outlier filter (LOF) [15] from scikit-learn [16]. +Deep-learning based anomaly detection. +NNAnomalyDetector class supports deep-learning model based log +anomaly detection algorithms, most of which are taking log sequence vectors as input. LogAI integrate some of +the popular deep learning based algorithms like recurrent neural network (RNN) based model LSTM [17], convolutional +neural network (CNN), Transformers [18] and pretrained Transformer based Language Model BERT [19]. The output +are anomaly scores for each log sequence. +Clustering. Clustering is a class to conduct log clustering analysis tasks. The input for log clustering are the +semantic log features. Clustering is integrated different clustering models, such as k-Means [20], DBSCAN [21] etc. +The output is a map between each log feature record and a cluster label. +4.1.5 +E2E Applications +Depending on the component modules from data layer, preprocessing layer, feature extraction layer and analysis layer, +LogAI provides the flexibility to build end-to-end log analysis applications. And the applications follows below design +principles 4. LogAI is launched with several out-of-the-box applications. +Figure 4: Design Principles of E2E Applications +Log Summarization. It is very important to understand your logs before using them for downstream tasks. Log +summarization leverages machine learning to process, aggregate and summarize logs. Please refer to the GUI module +Section 4.2 for more detail about how to use. +Log Clustering. Log clustering can be used to categorize logs. Finding meaningful clusters can bring benefits in a +variety of use cases like anomaly detection, log storage, query, etc. Please refer to the GUI module Section 4.2 for more +detail about how to use. +Log Anomaly Detection. Log anomaly detection is an application that detect anomalous loglines. Here in LogAI log +anomaly detection can detect both time-series anomalies and semantic anomalies. Please refer to the GUI module +Section 4.2 for more detail about how to use. +7 + +Log record +Data Preparation +ApplicationWorkflow +objects +Workflow Configuration +Dataloader +Preprocessing +IE-component-N +Analysis-Component +Configuration +Configuration +Configuration +Configuration +FileDataloader +Algorithm-1 +Algorithm-1 +Preprocessing +OpenSetDataloader +Algorithm-2 +Algorithm-2 +Partitioning +OtherDataloader +Algorithm-3 +Algorithm-3Cheng et. al +Figure 5: LogAI GUI portal +4.2 +GUI Module +The GUI module is implemented to provide a web portal for the out-of-the-box log analysis applications, including +log summarization, log clustering and log anomaly detection. Figure 5 shows the log summarization of LogAI portal. +LogAI portal is developed using Plotly Dash framework. +Control Panel. Control panel is on the left side of the page. In the control panel, users can upload files, configure file +and algorithm settings. When the user click "Run" button, the analysis execution is triggered. This behavior is uniform +for all three different applications. After analysis execution completed, the results will be displayed on the right side of +the page. +Main Display Panel. On the right side of the page we display the analysis results. Different applications may have +different layouts. The portal supports interactive visualization. The users can click or hover on parts in the charts to +drill down and get more detailed information. +The interaction between frontend and backend of different applications are designed to be unified. The control panel +collects user input and generate configuration for application and send to backend. Backend consumes the configuration +to create component instances to execute the workflow. After finishing the job, it will send the result table to frontend. +The display panel for each application controls how the result table will be rendered for visualization. Users can expand +the GUI portal to support customized analysis applications by following the same design pattern and reusing the existing +components. +4.3 +Summary of Supported ML Algorithms in LogAI +This section summarizes the machine learning algorithms supported in LogAI. LogAI provides an algorithms compo- +nent to implement all supported algorithms with algorithm factory. The algorithm contains five algorithmic mod- +ules, notably: parsing_algo, vectorization_algo, categorical_encoding_algo, clustering_algo, anomaly_detection_algo. +algorithms component also contains a nn_model module to implement all neural network models. LogAI has defined +unified algorithm interfaces for each module and we can implement more algorithms and integrated it with LogAI in +future development. The current LogAI algorithm coverage is shown in Table 4.3. +The deep-learning models generally being much more parameter-heavy, require more high-end compute devices like +GPU. In such cases, their LogAI implementations provide options to use different devices (CPU or GPU) or multiple +GPUs seamlessly through the algorithm parameter configurations. +8 + +a LOG Al Powered by Salesforce AI Research +Al-based Log Analysis +Summary +Attributes +Log.Summarization +Total Number of Loglines: 20000 +Level +Log. Clustering +Total Number of Log Patterns: 15 +INFO +AnomalyDetection +Charts +Trend of Occurrence at Freq(1s) +File Settings +Log Type +HDFS +50 +1000 +Select Log File +HDFS_20000.log +40 +Attributes +×Level +Time Interval +1s +10 +Parsing Algortihm +DRAIN +20:35:30 +20:36:00 +20:36:30 +20:37:00 +20:37:30 +20:38:00 +20:38:30 +Nov 9, 2008 +RUN +log pattern +timestamp +Log Patterns +dfs.FSNamesystemBLoCK*NameSystem.addStoredBlockblockMap updated*50010isaddedto* size* +Dynamic Values +Position +Count +Value +POSITION_O +4123 +10.251.215.192,10.251.110.196,10.251.91.159,10.250.18.114,10.251.194.245,10.251.90.239,10.251.203.4,10.251.42.246,10.251.30.6,10.251. +POSITION_1 +4123 +b1k_-8426566918839220582,blk_8892946833207246710,blk_4685864904040870678,blk_3733773024533525840,blk_1329134914737185064,blk_-542351385442Cheng et. al +Table 2: Summary of supported machine learning algorithms in LogAI +Module +Algorithm +Task +Log parser +DRAIN +Information Extraction +IPLoM +AEL +Log vectorizer +Fast-text +Unstructured Log Representation +TF-IDF +Word2Vec +Semantic +Sequential +BertTokenizer +Categorical Encoder +Label encoding +Structured Log Representation +OneHot Encoding +Ordinal Encoding +Clustering +DBSCAN +Analysis: Log Clustering +K-means +BIRCH +Anomaly Detection +One-class SVM +Analysis: Outlier Detection +Isolation Forest +LOF +Distribution divergence +ETS +Analysis: Time-series Anomaly Detection +Dynamic Baseline +ARIMA +NN models +CNN +Analysis: Sequential Anomaly Detection +LSTM +Transformers +LogBERT +Analysis: (Sequential / Non-Sequential) Anomaly Detection +5 +Experiments: Benchmarking Log Anomaly Detection +In this section, we elaborate some of the experimental effort at building pipelines for specific log analysis tasks on +publicly available log datasets. The purpose of this is to benchmark the performance of our LogAI library on these +standard tasks with the performances reported in existing literature or other well-known log libraries. +Amongst the different log analysis tasks, log based anomaly detection is perhaps the most objective task, where domain +experts like reliability and performance engineers can provide some supervision around which log sequences show +anomalous behavior. The other tasks like log clustering, summarization are much more subjective in nature while log +based root cause analysis is too specific and tightly coupled with the application or environment it is deployed in. Hence +for these tasks it is often impossible to collect supervision labels for benchmarking purposes. Consequently most of the +publicly available log analysis datasets and benchmarks have focused on the anomaly detection task. While a small +subset of these datasets have also been redesigned to serve log clustering and log summarization in past literature, they +can at best be considered as pseudo-oracle data for these tasks and are still are not large-scale enough for benchmarking +purposes. Hence, for this reason, in our LogAI library we focus on benchmarking only the log based anomaly detection +task. +Following the advances of Artificial Intelligence (AI), Machine Learning (ML) and Natural Language Processing +(NLP), for log anomaly detection task also traditional statistical ML based solutions (like SVM, Isolation Forest +etc.) have gradually given way to more powerful and sophisticated neural models. Some of these newer models can +leverage self-supervised learning to achieve comparable anomaly detection performance in unsupervised settings in +comparison to older traditional supervised models. Additionally, the traditional ML models having being around for +quite a while, have been more extensively studied with fairly well-reproduced benchmarks in existing literature. Hence +in our benchmarking experiments, we have only focused on the more recent neural models. +9 + +Cheng et. al +5.1 +Limitations of Existing Libraries and Benchmarking Practices +Over the past decade have been numerous literature [22, 23, 24, 25, 26, 27, 28, 29, 3] reporting the log anomaly +detection performance on some of the standard open-sourced log datasets, as well as various effort at open-sourcing +libraries catering the log anomaly detection task. For example, [2, 3] had released libraries (Loglizer and Deep-Loglizer) +for log based anomaly detection using traditional machine learning and more recent deep learning models, respectively. +In their library they had consolidated some of the benchmarking effort, bringing together all the popular log anomaly +detection models for a more fair comparison on a few public log datasets. +However, despite this, there is still a lack of rigorous standardisation and benchmarking amongst these works, especially +the ones employing neural models. Below we list some of the specific limitations of Loglizer and Deep-Loglizer library +which necessitates the need for an unified, generic framework for log analysis tasks: +• Generic Log Data Processing Pipeline: There is a lack of libraries that provide a generic data processing pipeline +that is common across different log datasets or different log anomaly algorithms. While Loglizer [5] and Deep- +Loglizer [3] has achieved this to some degree, they still require some dataset-specific preprocessing and customization +which are quite open-ended. For users wishing to replicate on their own datasets or other public datasets, there is no +clear framework guiding the necessary steps and output-structure of the dataset-specific preprocessing to follow. On +the other hand, LogAI library provides a an unified generic data-processing pipeline across all public datasets and +log analysis algorithms. It only requires a very minimal dataset-specific customization with a clear template of the +kind of preprocessing needed for each dataset - for e.g. each dataset has its own way of specifying the fields of the +LogRecordObject (governed by OpenTelemetry data models) e.g. labels or identifiers of the loglines - which are +either directly part of the raw log data or have to be derived based on some rules. +• Catering to multiple Log Analysis Tasks: There is a lack of libraries that can cater to all kinds of log analysis tasks +(including log clustering, summarization, anomaly detection etc) under a single generic platform. Each of the existing +log libraries are tailored for a specific kind of log analysis task. For example libraries like loglizer and Deep-Loglizer +specifically focus on log based anomaly detection, log-parser on parsing log data and log3C cater to clustering and +correlation specific analysis. On the other hand, logAI enables all of these analysis tasks along with others, like, +summarization, visualization etc under an unified framework. +• Coverage of Log Analysis Models: The existing Loglizer library provides the more traditional machine learning +algorithms for log based anomaly detection, with the Deep-Loglizer being a deep-learning based counterpart of it, +providing only neural ML models. LogAI on the other hand, provides a generic framework encompassing most of the +popular AI/ML algorithms - starting from traditional statistical ML models to popular neural models as well as more +recent pretrained Transformer (BERT) based models. Going ahead, our logAI library can provide a more extended +platform for integrating with more upcoming and powerful neural models as the mainstream deep learning research +progresses. For all of these models, logAI provides a single unified data processing platform, that is independent of +the kind of downstream analysis task or models. +Thus, with LogAI library, we aim at a more intuitive and easy-to-use log analysis framework for practitioners of +different areas and levels of expertise to perform log analysis, without being impeded by the technical nuances of the +task. +5.2 +Log based Anomaly Detection Workflow +In order to handle the complex and heterogenous nature of log data, log based anomaly detection typically follows a +multi-step pipeline. Starting with the raw log data dump or data streams, the log analysis workflow does some initial +preprocessing and cleaning-up of the raw logs to make them amenable to ML models. This is typically followed by log +parsing which extracts a loose structure from the semi-structured data and then performs grouping and partitioning of +the log lines into log sequences in order to model the sequence characteristics of the data. After this, the logs or log +sequences are vectorized i.e. represented as a machine-readable vector, by first tokenizing each instance and converting +each token to a d-dimensional embedding. On this vectorized version of the log data, various anomaly detection models +can be applied. +The choices of each of these steps (for e.g. whether to apply parsing or not, or whether to partition based on sessions or +sliding windows, or whether to apply clustering or not) can be guided by various factors - nature of the application +generating the log data or the model requirements or other efficiency or performance related constraints. +i) Log Preprocessing: In LogAI, this step involves handling the formatting of timestamps, logline-identifiers and any +associated labels (e.g. anomaly labels) in the raw log data to make it compatible to openTelemetry data. Additionally it +also provides customised filtering of specific regular expression patterns (like IP addresses or memory locations or file +paths) that are deemed irrelevant for the actual log analysis. +10 + +Cheng et. al +Figure 6: Example of Log Parsing +ii)Log Parsing: To enable downstream processing, un- +structured log messages first need to be parsed into a +structured event template (i.e. constant part that was ac- +tually designed by the developers) and parameters (i.e. +variable part which contain the dynamic runtime informa- +tion). Figure 6 provides one such example of parsing a +logline. In LogAI library we provide three popular log +parsers which use heuristic-based techniques - Drain [30], +IPLoM [31] and AEL [32]. +iii) Log Partitioning: After parsing the next step is to +partition the log data into groups, based on some seman- +tics where each group represents a finite chunk of log lines or log sequences. The main purpose behind this is to +decompose the original log dump, which typically consists of millions of log lines into logical chunks, so as to enable +explicit modeling on these chunks and allow the models to capture anomaly patterns over sequences of log templates or +log parameter values or both. In literature, various Log partitioning techniques have been applied [27, 33]. In LogAI we +provide different schemes like - Fixed or Sliding window based partitions, where the length of window is determined by +length of log sequence or a period of time, and Identifier based partitions where logs are partitioned based on some +identifier (e.g. the session or process they originate from). Figure 7 illustrates these different choices of log grouping +and partitioning. A log event is eventually deemed to be anomalous or not, either at the level of a log line or a log +partition. +Figure 7: Different types of log partition- +ing +iv) Log Vectorization: After log partitioning, the next step is to represent +each partition in a machine-readable way (e.g. a vector or a matrix) by +extracting features from them. This can be done in various ways [34, 33]. +In LogAI we provide the following vectorization techniques - +• i) sequential representation which converts each partition to an ordered +sequence of log event ids +• ii) quantitative representation which uses count vectors, weighted by the +term and inverse document frequency information of the log events +• iii) semantic representation captures the linguistic meaning from the se- +quence of language tokens in the log events and learns a high-dimensional +embedding vector for each token in the dataset. +The nature of log representation chosen has direct consequence in terms of +which patterns of anomalies they can support - for example, for capturing +keyword based anomalies, semantic representation might be key, while for anomalies related to template count and +variable distribution, quantitative representations are possibly more appropriate. The semantic embedding vectors +themselves can be either obtained using pretrained neural language models like GloVe, FastText, pretrained Transformer +like BERT, RoBERTa etc. Or they can also be learnt from scratch on the available training data, by building custom +vocabulary and using these neural language models. +v) Log Anomaly Detection Models for benchmarking: The task of log based anomaly detection is to analyze a dump +of log data, consisting of a series of timestamped log lines and identify the anomalous log lines that are potentially +incident-indicating. Based on the kind of application, log anomaly signals can either be used to detect or localize an +already occurred incident or disruption or used to forecast future potential faults or failures. In literature, log based +anomaly detection models have been broadly categorized into two types - Supervised and Unsupervised, based on the +kind of training framework they follow. Since our objective is to benchmark only neural models, we limit our discussion +in this section to this class of models alone. +Supervised Anomaly Detection models require the anomaly label to be available at the level of each log line or a log +group or partition. Furthermore, they typically assume that each of the training, development and test data will contain +a mix of both anomalous and non-anomalous log data. These models use the supervised losses like cross entropy loss or +squared error loss. But they can suffer due to the under-representativeness of the anomalous class of logs, especially if +they occur very rarely in the training and development data. Due to their direct dependency on modeling the anomalous +class explicitly these models also lack robustness when the anomaly distribution changes. +11 + +LogLine: 081109 204655 556 INFO dfs.DataNode$PacketResp0nder: +Received block blk_3587508140051953248 of size 67108864 from +/10.251.42.84 +Timestamp: 081109 204655 556 +Level: INFO +Component: dfs.DataNodesPacketResponder +Template: Received block <*> of size <*> from <*> +Parameter:["blk 3587508140051953248",“67108864",“10.251.42.84"]Fixed Partitions +Sliding Partitions +Identifier Partitions +1 +2 +1 +1 +2 +2 +3 +3 +3 +1 +2 +1 +1 +2 +2 +3 +3 +3 +2 +2 +2 +3 +3 +3Cheng et. al +Unsupervised Anomaly Detection models do not require any anomaly label for the log data. But the existing +unsupervised models in the literature typically assume that the entire training data is comprised of only normal or +non-anomalous logs and generally show a sharp decline in performance when the training data is adulterated with even +a small fraction of anomalous logs. Amongst the most popular unsupervised anomaly detection models, mainly two +paradigms have been followed: +• Forecasting based models: These models learn the representations of the log lines through a forecasting based +self-supervision i.e. by learning to predict the label of next log line given an input context of log sequence. For +all of these models, following Deep-Loglizer paper, the label has been taken as the event id of the next log line. +This category of models includes various sequence encoding networks that have been popular in deep-learning - +like recurrent neural network or convolutional neural network based models or the more recent, more powerful +self-attention based Transformer models. These models are typically trained a cross-entropy loss between the true +and predicted distributions, which aims to maximise the likelihood of the true label, conditional to the given input +sequence. +• Reconstruction based models: This includes Auto-encoder based models which try to reconstruct a given sequence +of loglines through a learnable hidden layer that learn an n-dimensional representation of each log-line. The other +more recent models in this category are Transformer based models which are trained using masked-language modeling +principles. During training a certain fraction of the input tokens would be masked and the model would learn to +predict these tokens using the remaining input context; and in the process learning the contextual representation of +each token in a log-line or a log-sequence. This is the fundamental principle behind BERT Language model with the +masked language modeling providing the learning objective when training on the log data in a self-supervised way. +Forecasting based Anomaly Detection: For our benchmarking with forecasting based models, we select three core +deep learning models which have been the basis of the some of the most popular recent neural log anomaly detection +methods +• LSTM: This model corresponds to a long-short term memory (LSTM) network to encode a given log sequence. It +also provides various options - i) whether to utilize uni-directional or bi-directional encoding of tokens in a given +input sequence ii) whether to have a learnable attention network over the input sequence, which linearly combines the +hidden representations with the attention weights. +• CNN: This model corresponds to a convolutional neural network (CNN) to encode a given log sequence. Different +convolutional layers with different shape settings are applied on the input followed by a 1-d max-pooling operation. +The outputs from each of these are then concatenated and fed into a fully-connected layer. +• Transformer: This model corresponds to a Transformer based encoder network with a multi-headed self-attention +mechanism to encode a given log sequence. Since the Transformer outputs a d-dimensional representation for +each token in the input log-sequence, a mean-pooling operation is applied over those representations, to get a fixed +representation for the entire sequence. +Since the LSTM, CNN and Transformer models need a d-dimensional representation of each log, first an embedding +layer is applied to the raw log input features. In case of sequential feature representation, each log event id is embedded +as a d-dimensional vector, while for semantic feature representation, the embedding layer is initialized with the +pretrained embeddings (e.g. Word2Vec or FastText etc) and embeds each log token id to a d-dimensional vector. +The output of the LSTM, CNN or Transformer a fixed d-dimensional representation of the input sequence which is +then downward projected to 1-d space, followed by a softmax layer. For supervised versions of these models, since the +explicit label (anomalous or not) exists for each log-line or log-sequence, the output of the softmax layer is aimed to +directly predict this label. For forecasting based unsupervised versions, the output of the softmax layer is aimed to +predict the id of the next log-line, that is succeeding the given input log sequence. During inference, for forecasting +based unsupervised models make a prediction for a given input log sequence, which is then compared against the actual +log event following the input sequence. We follow the similar inference strategy as [3] and predict a test instance as +anomalous if the ground truth is not one of the k (=10) most probable log events predicted by the model. A smaller k +imposes more demanding requirements on the model’s performance. +In literature, LSTM based models have been used by DeepLog [35], LogAnomaly [34] and LogRobust [36]. While +DeepLog uses sequential representations, where each log message is represented by the index of its log event, +LogAnomaly uses semantic representations. While both of these use unidirectional LSTM in an unsupervised setting, +LogRobust uses supervised version of an bi-directional LSTM with the attention network. CNN has been used by [37] +but only in a supervised setting. Transformer based model has been applied in LogSy [38], but they additionally use +auxiliary log datasets as pseudo-anomalous data. This helps them to learn a better representation of normal log data +from the target system of interest while regularizing against overfitting. In order to ensure better reproducibility, in our +benchmarking we do not use any additional log datasets and hence in some of the supervised settings, our Transformer +based models suffer from overfitting issues and yield somewhat poorer results and are not directly comparable to the +12 + +Cheng et. al +results obtained by [37]. Following [3] for all of these models, in both the supervised and unsupervised settings, we +report the F1-Scores. +Reconstruction based Anomaly Detection: For our benchmarking with reconstruction based models, we select the +LogBERT model from the work LanoBERT [39]. Following that literature, the preprocessing configurations are set +before the BERT model can be applied - i) Since LogBERT is a parser-free technique, no log parsing is applied. ii) +For obtaining the vectorized log representation, the preprocessed log sequences are tokenized using the WordPiece +(Wu et al. 2016) model used in BERT. iii) The tokenizer is trained from scratch on each log dataset to ensure that the +dataset-specific custom vocabulary can be learned. During training the usual masked language modeling principles +of BERT is followed. During inference, multiple masked versions of each test instance is generated, by passing a +fixed-size masking window over the token sequence, ignoring masking of special characters. Thus a test instance of +sequence length N will result in an average of N +n masked instances, each have a masked n-gram of length upto n. After +running the inference on the masked test instance, the anomaly score is obtained as the average of the top-prediction +probabilities (or log-probabilities) over the k-most confident masked tokens. Following LanoBERT, we report AUROC +(Area under ROC) metric over this anomaly score. +All unsupervised models, (forecasting or reconstruction based) are trained only on the normal log data. Following +Deep-Loglizer, for the forecasting based models, around 20% of the data and for LogBERT, following LanoBERT, +around 30% of the data is sequestered for test. These percentages include the entire set of anomalous logs in the dataset. +In LogAI, we take out 10% of the training data as development set for validation and model selection purposes. +5.3 +Datasets: +Following Deep-Loglizer and LanoBERT, we perform our benchmarking experiments on two of the most popular +public log anomaly detection datasets - HDFS and BGL. Additionally for LogBERT we also benchmark on the public +dataset, Thunderbird. Further, similar to Deep-Loglizer, for BGL dataset we also perform a fixed-window based log +partitioning by grouping log-lines over every 6-hourly window. However for LogBERT model, following LanoBERT, +we treat each individual log-line as a train or test instance, without doing any log partitioning. On the other hand, for +HDFS dataset, since anomaly labels are available only at the level of each session-id (which is also known as BLOCK +in the raw dataset), we use identifier based log partitioning, by constructing log-sequences for each session-id. These +resulting log partitions are treated as the training or test instances for all algorithms. +5.4 +Experimental Settings and Results: +For our benchmarking we conduct experiments on the above choice of anomaly detection algorithms under various +settings and compare our experimental results with those published in Deep-Loglizer [3] and LanoBERT [39] papers In +Table 3 we list the performance of the different supervised and unsupervised forecasting-based models (LSTM, CNN +and Transformer), while 4 shows the results using the unsupervised reconstruction-based LogBERT model. +Evaluation Metrics: In order to compare the performances, for all supervised and unsupervised forecasting-based +models we use F1-Score as the metric, following Deep-Loglizer paper. Whereas, for LogBERT, following LanoBERT +paper we report the AUROC metric. LanoBERT paper also provides F1-Score, but the F1-Score calculation needs +fixing a threshold, which is challenging to do over the training data that only has normal logs. According to the paper, +their reported scores are the best F1 value that was calculated using the threshold that yields the best performance for +the test dataset. This is not a fair metric, as it involves label-knowledge of the blind test set and hence we only compare +using AUROC metric. +Configuration Settings for Evaluation: For each of LSTM and Transformer models, we benchmark on 8 different +configuration settings for each dataset - based on the kind of supervision (supervised or unsupervised), whether log +parsing is applied or not, whether the log representation is sequential or semantics based. For CNN models, we found +the semantics based log representation results in very slow convergence rate, hence we have benchmarked our results +using only the sequential feature representations of the logs. On the other hand, Deep-Loglizer showcases only specific +settings for these models - for e.g. forecasting based unsupervised anomaly detection is done using Unidirectional +LSTM with no-attention and Transformer network while supervised models are Bidirectional LSTM with attention and +CNN network, whereas all of these methods can be applied on both supervised and unsupervised settings. Each of their +models use the Log Parsing step and have two variants that use sequential and semantic feature representations for the +logs. However Deep-Loglizer paper [3] provides only 8 configurations for each dataset whereas LogAI is benchmarked +on a more exhaustive set of 20 configurations per dataset. +Performance Comparison: In most of these configurations the performance achieved by LogAI is comparable to that +of Deep-Loglizer. The 2-3% difference in performance between the models is not quite statistically significant and can +13 + +Cheng et. al +Model +Details +Supervision +Log Pars- +ing +Log +Represen- +tation +HDFS +BGL +LogAI +Deep- +Loglizer LogAI +Deep- +Loglizer +LSTM +Unidirectional, No +Attention +Unsupervised + +sequential +0.981 +0.944 +0.938 +0.961 +semantic +0.981 +0.945 +0.924 +0.967 + +sequential +0.979 +- +0.925 +- +semantic +0.981 +- +0.924 +- +Bidirectional, With +Attention +Supervised + +sequential +0.984 +0.96 +0.983 +0.983 +semantic +0.964 +0.964 +0.95 +0.983 + +sequential +0.989 +- +0.931 +- +semantic +0.971 +- +0.983 +- +CNN +2-D Convolution with +1-D Max pooling +Unsupervised + +sequential +0.981 +- +0.929 +- + +sequential +0.981 +- +0.922 +- +Supervised + +sequential +0.943 +0.97 +0.983 +0.972 + +sequential +0.946 +- +0.990 +- +Transformer +Multihead +single- +layer self-attention +model, +trained +from scratch +Unsupervised + +sequential +0.971 +0.905 +0.933 +0.956 +semantic +0.978 +0.925 +0.921 +0.957 + +sequential +0.98 +- +0.92 +- +semantic +0.975 +- +0.917 +- +Supervised + +sequential +0.934 +- +0.986 +- +semantic +0.784 +- +0.963 +- + +sequential +0.945 +- +0.994 +- +semantic +0.915 +- +0.977 +- +Table 3: Comparison between different supervised and unsupervised Forecasting-based neural anomaly detection +models in LogAI and Deep-Loglizer library [3], using F1-Score as the performance metric. The dashed (-) cells indicate +that there are no reported numbers in the Deep-Loglizer paper corresponding to those configurations. +mostly be attributed to the following factors: Following the implementation open-sourced by authors of Deep-Loglizer +in https://github.com/logpai/deep-loglizer, it is evident that the library does not utilize any development (or +validation) set and directly performs model selection based on the test performance. LogAI on the other hand, selects +the model checkpoint on the validation performance and reports the results on the blind test set. Secondly, because of +the same reason the resulting the training and test splits used by LogAI and Deep-Loglizer are not identical. Especially +for BGL data, perhaps the performance difference is somewhat more observeable, since both libraries apply fixed +time-partitions of 6 hours and reports the evaluation at the level of the partitions, instead of the logline level evaluation. +This also adds to the possibility of more significant differences in the training/test data setup between the two models. +For Transformer based models, especially in the supervised setting, we observe a reduced performance. Similar effect +had been studied in the original work [38] that used Transformer model as Log Anomaly Detector in the supervised +setting. Their model suffered from overfitting on the target system’s log data due to the presence of only rare and sparse +anomaly patterns in the train data. To overcome the overfitting issue, they additionally involve other external system’s +logs as auxiliary data - treating them as pesudo “anomalous” logs. But in order to keep our benchmarking reproducible, +we do not use any additional auxiliary data abd subsequently report a poorer performance. The Deep-Loglizer paper +also benchmarks with only the unsupervised setting of the Transformer model, which is much less prone to overfitting. +For LogBERT model, we benchmark the test results taking various inferencing strategies. Given a test instance, which +has been converted to multiple masked versions (each having a continuous n-gram masked), either we average the +inference score either over all the masked tokens or over the top-6 most confident ones, based on the the model +prediction likelihood. For the latter we consider different inference scores - mean predictive loss or maximum predictive +probability or log probability or the entropy of the prediction distribution. All of these metrics are quite correlated and +our objective is to simply show that our LogBERT implementation yields reasonably stable results across these different +inferencing strategies. While LanoBERT also uses Predictive Loss and Probability based scores, they provide AUROC +evaluation metric metric only for the latter and they also evaluate only HDFS and BGL dataset. In the predictive +probability based inference strategy, results obtained by LogAI and LanoBERT are quite comparable, with small +differences owing to the variability of the train, test splits used in the two implementations (The authors of LanoBERT +have used their own train test split due to the general lack of standardized data splits for these datasets). +14 + +Cheng et. al +Inference Strategy +Datasets +HDFS +BGL +Thunderbird +LogAI +LanoBERT +LogAI +LanoBERT +LogAI +LanoBERT +Averaged over all masked tokens +Mean Predictive Loss +0.983 +- +0.998 +- +0.953 +- +Averaged over top-6 most-confident masked tokens +Mean Predictive Loss +0.98 +- +0.964 +- +0.937 +- +Max Predictive Prob. +0.976 +0.99 +0.972 +0.972 +0.953 +- +Max Predictive LogProb. +0.976 +- +0.969 +- +0.917 +- +Mean Predictive Entropy +0.976 +- +0.973 +- +0.967 +- +Table 4: Comparison of LogBERT model performance achieved by our LogAI library and by LanoBERT [39], using the +AUROC metric. Both versions of the model are in unsupervised setting (trained on normal logs only) and do not need +any log parsing. The dashed (-) cells indicate that there are no reported numbers in the LanoBERT paper corresponding +to those configurations. +Overall our experiments on the suite deep learning based log anomaly detection models suggests that their implemen- +tations in the LogAI library is able to reproduce the established performance benchmarks on standard open-source +datasets with reasonable accuracy. Additionally, owing to a more generic data processing pipeline we are seamlessly +able to extend to a more exhaustive set of experimental settings, than what has been explored or implemented before in +existing literature and libraries. +6 +Conclusion +In this technical report we introduced LogAI, an open source library for AI-based log analytics and intelligence. LogAI +library uses the same unified log data model as OpenTelemetry to ensure the analytical processes to be agnostic to any +log platforms that supports OpenTelemetry. LogAI also abstracts common processes in different downstream tasks and +provides reusable components to execute these processes. LogAI also provides a large varieties of AI capabilities, from +time-series anlaysis, traditional statistical learning to deep learning and pre-trained transformer models. We showed +how LogAI can be used to conduct a variety of common log analysis tasks such as log summarization, clustering and +anomaly detection and also provide extensive benchmarking results on Log Anomaly Detection. LogAI version v0.1.0 +is released as open-source code under BSD-3-Clause license. Our team will provide continuous support and further +improvements in the future versions. +Acknowledgments +We would like to thank a number of leaders and colleagues from Salesforce.com Inc. who have provided strong support, +advice, and contributions to this open-source project. +References +[1] The Business Research Company. Log Management Global Market Report. 2023. +[2] Shilin He, Jieming Zhu, Pinjia He, and Michael R. Lyu. Experience report: System log analysis for anomaly +detection. In 2016 IEEE 27th International Symposium on Software Reliability Engineering (ISSRE), pages +207–218, 2016. +[3] Zhuangbin Chen, Jinyang Liu, Wenwei Gu, Yuxin Su, and Michael R. Lyu. Experience report: Deep learning-based +system log analysis for anomaly detection. CoRR, abs/2107.05908, 2021. +[4] Jiang Zhaoxue, Li Tong, Zhang Zhenguo, Ge Jingguo, You Junling, and Li Liangxiong. A survey on log research +of aiops: Methods and trends. Mobile Networks and Applications, pages 1–12, 2022. +[5] Shilin He, Jieming Zhu, Pinjia He, and Michael R. Lyu. Experience report: System log analysis for anomaly +detection. In 27th IEEE International Symposium on Software Reliability Engineering, ISSRE 2016, Ottawa, ON, +Canada, October 23-27, 2016, pages 207–218. IEEE Computer Society, 2016. +[6] Zhuangbin Chen, Jinyang Liu, Wenwei Gu, Yuxin Su, and Michael R. Lyu. Experience report: Deep learning-based +system log analysis for anomaly detection, 2021. +15 + +Cheng et. al +[7] Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R Lyu. Drain: An online log parsing approach with fixed depth +tree. In 2017 IEEE international conference on web services (ICWS), pages 33–40. IEEE, 2017. +[8] Adetokunbo AO Makanju, A Nur Zincir-Heywood, and Evangelos E Milios. Clustering event logs using iterative +partitioning. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and +data mining, pages 1255–1264, 2009. +[9] Zhen Ming Jiang, Ahmed E Hassan, Gilbert Hamann, and Parminder Flora. An automated approach for abstracting +execution logs to execution events. Journal of Software Maintenance and Evolution: Research and Practice, +20(4):249–267, 2008. +[10] Juan Ramos et al. Using tf-idf to determine word relevance in document queries. In Proceedings of the first +instructional conference on machine learning, volume 242, pages 29–48. Citeseer, 2003. +[11] Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. Enriching word vectors with subword +information. Transactions of the association for computational linguistics, 5:135–146, 2017. +[12] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient estimation of word representations in vector +space. arXiv preprint arXiv:1301.3781, 2013. +[13] Aadyot Bhatnagar, Paul Kassianik, Chenghao Liu, Tian Lan, Wenzhuo Yang, Rowan Cassius, Doyen Sahoo, +Devansh Arpit, Sri Subramanian, Gerald Woo, Amrita Saha, Arun Kumar Jagota, Gokulakrishnan Gopalakrishnan, +Manpreet Singh, K C Krithika, Sukumar Maddineni, Daeki Cho, Bo Zong, Yingbo Zhou, Caiming Xiong, Silvio +Savarese, Steven Hoi, and Huan Wang. Merlion: A machine learning library for time series. 2021. +[14] Bernhard Schölkopf, John C. Platt, John C. Shawe-Taylor, Alex J. Smola, and Robert C. Williamson. Estimating +the support of a high-dimensional distribution. Neural Comput., 13(7):1443–1471, jul 2001. +[15] Markus M. Breunig, Hans-Peter Kriegel, Raymond T. Ng, and Jörg Sander. Lof: Identifying density-based local +outliers. In Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data, SIGMOD +’00, page 93–104, New York, NY, USA, 2000. Association for Computing Machinery. +[16] F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, +V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn: +Machine learning in Python. Journal of Machine Learning Research, 12:2825–2830, 2011. +[17] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural Computation, 9(8):1735–1780, 1997. +[18] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Łukasz Kaiser, +and Illia Polosukhin. Attention is all you need. In Proceedings of the 31st International Conference on Neural +Information Processing Systems, NIPS’17, page 6000–6010, Red Hook, NY, USA, 2017. Curran Associates Inc. +[19] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional +transformers for language understanding. In Jill Burstein, Christy Doran, and Thamar Solorio, editors, Proceedings +of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human +Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short +Papers), pages 4171–4186. Association for Computational Linguistics, 2019. +[20] D. Sculley. Web-scale k-means clustering. In Proceedings of the 19th International Conference on World Wide +Web, WWW ’10, page 1177–1178, New York, NY, USA, 2010. Association for Computing Machinery. +[21] Erich Schubert, Jörg Sander, Martin Ester, Hans Peter Kriegel, and Xiaowei Xu. Dbscan revisited, revisited: Why +and how you should (still) use dbscan. ACM Trans. Database Syst., 42(3), jul 2017. +[22] Jiang Zhaoxue, Li Tong, Zhang Zhenguo, Ge Jingguo, You Junling, and Li Liangxiong. A survey on log research +of aiops: Methods and trends. Mob. Netw. Appl., 26(6):2353–2364, dec 2021. +[23] Shilin He, Pinjia He, Zhuangbin Chen, Tianyi Yang, Yuxin Su, and Michael R. Lyu. A survey on automated log +analysis for reliability engineering. ACM Comput. Surv., 54(6), jul 2021. +[24] Paolo Notaro, Jorge Cardoso, and Michael Gerndt. A survey of aiops methods for failure management. ACM +Trans. Intell. Syst. Technol., 12(6), nov 2021. +[25] Xiao Han and Shuhan Yuan. Unsupervised cross-system log anomaly detection via domain adaptation. In +Proceedings of the 30th ACM International Conference on Information & Knowledge Management, CIKM ’21, +page 3068–3072, New York, NY, USA, 2021. Association for Computing Machinery. +[26] Van-Hoang Le and Hongyu Zhang. Log-based anomaly detection with deep learning: How far are we? In +Proceedings of the 44th International Conference on Software Engineering, ICSE ’22, page 1356–1367, New +York, NY, USA, 2022. Association for Computing Machinery. +16 + +Cheng et. al +[27] Nengwen Zhao, Honglin Wang, Zeyan Li, Xiao Peng, Gang Wang, Zhu Pan, Yong Wu, Zhen Feng, Xidao Wen, +Wenchi Zhang, Kaixin Sui, and Dan Pei. An empirical investigation of practical log anomaly detection for online +service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference +and Symposium on the Foundations of Software Engineering, ESEC/FSE 2021, page 1404–1415, New York, NY, +USA, 2021. Association for Computing Machinery. +[28] Yichen Zhu, Weibin Meng, Ying Liu, Shenglin Zhang, Tao Han, Shimin Tao, and Dan Pei. Unilog: Deploy one +model and specialize it for all log analysis tasks. CoRR, abs/2112.03159, 2021. +[29] Jacopo Soldani and Antonio Brogi. Anomaly detection and failure root cause analysis in (micro) service-based +cloud applications: A survey. ACM Comput. Surv., 55(3), feb 2022. +[30] Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R. Lyu. Drain: An online log parsing approach with fixed +depth tree. In 2017 IEEE International Conference on Web Services (ICWS), pages 33–40, 2017. +[31] Adetokunbo A.O. Makanju, A. Nur Zincir-Heywood, and Evangelos E. Milios. Clustering event logs using +iterative partitioning. In Proceedings of the 15th ACM SIGKDD International Conference on Knowledge Discovery +and Data Mining, KDD ’09, page 1255–1264, New York, NY, USA, 2009. Association for Computing Machinery. +[32] Zhen Ming Jiang, Ahmed E. Hassan, Parminder Flora, and Gilbert Hamann. Abstracting execution logs to +execution events for enterprise applications (short paper). In 2008 The Eighth International Conference on Quality +Software, pages 181–186, 2008. +[33] Mostafa Farshchi, Jean-Guy Schneider, Ingo Weber, and John Grundy. Experience report: Anomaly detection of +cloud application operations using log and cloud metric correlation analysis. In 2015 IEEE 26th International +Symposium on Software Reliability Engineering (ISSRE), pages 24–34, 2015. +[34] Weibin Meng, Ying Liu, Yichen Zhu, Shenglin Zhang, Dan Pei, Yuqing Liu, Yihao Chen, Ruizhi Zhang, Shimin +Tao, Pei Sun, and Rong Zhou. Loganomaly: Unsupervised detection of sequential and quantitative anomalies in +unstructured logs. In Proceedings of the 28th International Joint Conference on Artificial Intelligence, IJCAI’19, +page 4739–4745. AAAI Press, 2019. +[35] Min Du, Feifei Li, Guineng Zheng, and Vivek Srikumar. Deeplog: Anomaly detection and diagnosis from +system logs through deep learning. In Proceedings of the 2017 ACM SIGSAC Conference on Computer and +Communications Security, CCS ’17, page 1285–1298, New York, NY, USA, 2017. Association for Computing +Machinery. +[36] Xu Zhang, Yong Xu, Qingwei Lin, Bo Qiao, Hongyu Zhang, Yingnong Dang, Chunyu Xie, Xinsheng Yang, Qian +Cheng, Ze Li, Junjie Chen, Xiaoting He, Randolph Yao, Jian-Guang Lou, Murali Chintalapati, Furao Shen, and +Dongmei Zhang. Robust log-based anomaly detection on unstable log data. In Proceedings of the 2019 27th ACM +Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software +Engineering, ESEC/FSE 2019, page 807–817, New York, NY, USA, 2019. Association for Computing Machinery. +[37] Siyang Lu, Xiang Wei, Yandong Li, and Liqiang Wang. Detecting anomaly in big data system logs using +convolutional neural network. In 2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, +16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and +Cyber Science and Technology Congress, DASC/PiCom/DataCom/CyberSciTech 2018, Athens, Greece, August +12-15, 2018, pages 151–158. IEEE Computer Society, 2018. +[38] Sasho Nedelkoski, Jasmin Bogatinovski, Alexander Acker, Jorge Cardoso, and Odej Kao. +Self-attentive +classification-based anomaly detection in unstructured logs. In 2020 IEEE International Conference on Data +Mining (ICDM), pages 1196–1201, 2020. +[39] Yukyung Lee, Jina Kim, and Pilsung Kang. Lanobert : System log anomaly detection based on BERT masked +language model. CoRR, abs/2111.09564, 2021. +17 + diff --git a/0dFQT4oBgHgl3EQf0Daw/content/tmp_files/load_file.txt b/0dFQT4oBgHgl3EQf0Daw/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..8a73ded27fb1b86fd09497b1e3810b1e6edb6047 --- /dev/null +++ b/0dFQT4oBgHgl3EQf0Daw/content/tmp_files/load_file.txt @@ -0,0 +1,853 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf,len=852 +page_content='LOGAI: A LIBRARY FOR LOG ANALYTICS AND INTELLIGENCE Qian Cheng, Amrita Saha, Wenzhuo Yang, Chenghao Liu, Doyen Sahoo, Steven Hoi Salesforce AI Research {qcheng, amrita.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='saha, wenzhuo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='yang, chenghao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='liu, dsahoo, shoi}@salesforce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='com ABSTRACT Software and System logs record runtime information about processes executing within a system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' These logs have become the most critical and ubiquitous forms of observability data that help developers understand system behavior, monitor system health and resolve issues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' However, the volume of logs generated can be humongous (of the order of petabytes per day) especially for complex distributed systems, such as cloud, search engine, social media, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This has propelled a lot of research on developing AI-based log based analytics and intelligence solutions that can process huge volume of raw logs and generate insights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In order to enable users to perform multiple types of AI-based log analysis tasks in a uniform manner, we introduce LogAI (https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='com/ salesforce/logai), a one-stop open source library for log analytics and intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI supports tasks such as log summarization, log clustering and log anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' It adopts the OpenTelemetry data model, to enable compatibility with different log management platforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI provides a unified model interface and provides popular time-series, statistical learning and deep learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Alongside this, LogAI also provides an out-of-the-box GUI for users to conduct interactive analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' With LogAI, we can also easily benchmark popular deep learning algorithms for log anomaly detection without putting in redundant effort to process the logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' We have opensourced LogAI to cater to a wide range of applications benefiting both academic research and industrial prototyping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Keywords Log Analysis · Machine Learning · Anomaly Detection · Clustering · Artifical Intelligence · AIOps 1 Introduction System and Software logs are text messages that are embedded by software and application developers in the source code and are designed to carry useful runtime information about the process, which are typically dumped as raw log files, once the system starts executing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In modern computer systems, especially for large distributed systems that run complex software, such as search engines, social network websites, and cloud platforms, logs are one of the most critical observability data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Logs are widely used in a variety of operational tasks, covering use cases such as system availability, reliability and security.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In scenarios when users have no direct access to the physical servers, logs are often the ground truth about the systems and applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' As such, Log Management has become a very important task in the industrial landscape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In fact, log management market size grew to $2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='29 billion in 2023, at a compound annual growth rate (CAGR) of 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='9%, according to the report from The Business [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Ideally, logs should be capturing the runtime information at a very granular level and stored permanently so that when any disruptive incident occurs, developers and operators can always look up the correct log file and inspect the log messages to debug what caused the incident.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In reality though, because of the colossal size of the log dumps, storing them permanently in the raw form is often impractical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This challenge can be mitigated with the help of large cloud-based logging systems such as AWS Cloudwatch and Microsoft Azure Logs where it is possible to even store the entire log data and retain them for a substantial period of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Moreover, these logging systems also provide capabilities to help efficient log querying and visualization, enabling developers and operators to quickly access the log dumps or log streams of their software.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' With these capabilities, the main open question is, how to explore raw logs and find the right set of logs associated with an incident?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' followed by a more advanced one - Is there a way to automatically analyze the logs and tell if there are issues with a system, create incidents and provide additional insights?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='13415v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='AI] 31 Jan 2023 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al Depending on which operational stage logs are involved in, the goal of log analysis in that specific situation could be different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Logs can be used for incident detection, where reliability engineers and developers need to continuously monitor the log streams in order to detect any unexpected behavior that might be indicative of an incident.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For post incident detection, log data can play a critical role in root-cause analysis, where operators examine the raw logs to identify the loglines that show anomalous patterns and thus localize the anomaly and eventually the root cause of the incident to a single service, component or module or a group of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The situation becomes even more complex in large distributed systems, where people (typically reliability engineers) who inspect the logs to resolve incidents may not necessarily be the same group of people (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' software and application developers) who write the logging statements in software code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In these situations, understanding even simple dump logs can take significant amount of time and effort, owing to the open-ended nature of the log data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Over the past decade there have been various effort targeted at developing both commercial and open-source software to cater to automated log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Though, most of the initial work used either domain specific rules or heuristics, with the proliferation of AI and ML, more and more data-driven techniques have been adopted and popularized in this community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' However, most of the AI-driven effort has been applied in an isolated manner, focusing on specific log analysis tasks (like how to extract structure out of the raw logs or how to detect anomaly patterns in it).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' There is still an urgent need for bringing together all the AI, ML and NLP techniques to a unified platform that can cater to the entire suite of different log analysis tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Nevertheless, creating such a one-stop library to serve a diverse set of log-based analytics can be quite non-trivial, with some of the potential challenges being, as follows: Lack of unified log data model for log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Different logs are in different formats and as a result analysis tools need to be customized for different log formats and schemas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' It is not easy to generalize analytical algorithms without a unified data model that can handle heterogenous forms of log data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Redundant effort in data preprocessing and information extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The current status of log analytics in this community is that there is a lack of a consolidated pipeline for data preprocessing and information extraction across all log analysis models and tasks - i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' different log analysis algorithms have been implemented independently, with each adopting their own pipelines and workflows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For different tasks, or even different algorithms of the same task, developers need to implement multiple redundant preprocessing and information extraction process modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Difficulty in managing log analysis experiments and benchmarking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Empirical benchmarking forms a critical part of research and applied science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In the existing literature, there is no unified workflow management mechanism to run log analysis benchmarking experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For example, while there has been some isolated pockets of deep learning research for log anomaly detection, it is quite challenging for other organizations or users to adopt them or reproduce their experimental results, due to the lack of a common unified framework for log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In this inter-disciplinary community of AIOps, users may have different needs while working on log analysis in academic and industrial settings when they are in different roles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For example, 1) Machine learning researchers may need a hassle-free way to perform benchmarking experiments on public log datasets and reproduce the experimental results from peer research groups in order to develop new log analysis algorithms;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 2) Industrial data scientists and AIOps practitioners may need an intuitive workflow to quickly experiment with existing log analysis algorithms on their own log data and select the best performing algorithm, hyperparameters and experimental configurations as their log analysis solution, and 3) Data and software engineers need to integrate the selected algorithm into production and deploy them in a smooth and efficient way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Unfortunately, we realize there is no existing open source toolkit that can satisfy all the above needs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' We are thus motivated to develop a holistic LogAI solution - a python library aimed for conducting AI-based log analytics and intelligence tasks to serve a variety of academic and industrial use-cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI (https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' com/salesforce/logai) provides a unified way to conduct various of log analysis tasks such as log summarization, clustering, anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI also provides a unified data model, inheriting from OpenTelemetry log data model, to handle logs in different formats.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI is also the first open source log analytics library that incorporate time-series algorithms, statistical learning algorithms and deep learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Moreover, LogAI implemented an out-of-the-box GUI portal to conduct log analysis in interactive way, more naturally align with the user experience of real-world log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Besides, in this technical report we also demonstrate how to use LogAI to easily benchmark deep learning algorithms for log anomaly detection without any redundant effort in log preprocessing and cleaning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In this community, there are existing libraries like LogLizer and Deep-Loglizer [2, 3] which have consolidated some of the AI/ML effort for the log domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' However, they still suffer from a few limitations - for example lacking a unified data processing pipeline that is generic across all tasks or algorithms or catering to only anomaly detection as the log analysis task or covering only a 2 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al specific types of algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Section 5, we elaborate on the limitations of these existing libraries and also show how LogAI provides a more intuitive framework for designing and managing the experimental settings while performing comparable to Deep-Loglizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 2 Related Work Recently, researchers and engineers have been working on a variety of problems about automated log analysis in academia and industry [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Based on the existing solutions, we can summarize a common workflow to conduct automated log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The common workflow contains four steps: log collection, log cleaning and preprocessing, log information extraction and log analysis and intelligence applications, Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log collection is the data loading step that collects logs from local log dump files or log management platforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log cleaning and preprocessing is the step to use predefined rules and domain knowledge to clean noisy log data, remove or replace known log templates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This step usually does not involve any ML process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log information extraction is the step where ML models are involved to extract information from log data, and feed the log representation or features to train ML models for analytics and intelligence application tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log information extraction usually contains several steps like log parsing, partitioning, feature extraction, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The final step, log analytics and intelligence, is to train ML models for a specific log downstream task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For example, log clustering and summarization are common log analytics tasks, while log based anomaly detection and root-cause analysis are common log intelligence tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Figure 1: Common Log Analytics and Intelligence Workflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The common workflow contains four steps: 1) log collection from local log files or log platforms, 2) log cleaning and preprocessing, 3) log information extraction and 4) log analytics tasks (such as clustering and summarization) and log intelligence tasks (such as anomaly detection and root-cause analysis).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log analysis has a very long history and there are a lot of tools for log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Almost all commercial log management software/SaaS have associated log analysis/ log insights offerings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This includes log management products such as Splunk, DataDog, NewRelic, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=', as well as cloud providers such as Amazon AWS, Microsoft Azure and Google Cloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In open source community, there are also very popular log management and analysis projects such as GreyLogs, Grafana, Prometheus, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' However, neither these commercial log management platform nor open-source log analysis tools are incorporated with comprehensive AI techniques such as deep learning, large language models (LLM), BERT, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Meanwhile, there are a few open-source AI-based log analysis tools that started to support more comprehensive AI techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For example, LOGPAI (https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='com/logpai/) is one of the most famous log anaysis community on GitHub.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LOGPAI provides logparser for automated log parsing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LOGPAI also provides loglizer [5] and deep-loglizer [6] for log anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Besides LOGPAI, there are other open-source projects, most of which are open source code from research outcomes, such as LogClass and Log2Vec from NetManAIOps (https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='com/orgs/NetManAIOps).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 3 Design Principles In this section we discuss about the design principles of LogAI library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI provides a unified framework for log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In order to achieve this, LogAI follows the following design principles: 1) high compatibility with data from different log sources, 2) reusable components to avoid reproducing effort, 3) unified setup process for customized applications and 4) easy-to-use GUI for out-of-box interactive log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 3 Log Information Log Analytics and Log Collection Log Cleaning and Extraction Intelligence Preprocessing : From local files : Cleaning Noisy Data Log Parsing : Analytics: : From log platforms : Log Partitioning 。' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Clustering : Remove or Replace Custom Log .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Feature Extraction Summarization Templates : Intelligence : Anomaly Detection Rootcause AnalysisCheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1 Compatible with data from different log sources One of the attractive qualities of log data is its open-ended form, where developers can design them to capture useful runtime and performance information to any arbitrary level of granularity as per the needs of the application.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Different software can generate very different logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Even in the same software, there are different levels of logs, such as service logs, application logs, systems logs, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' These logs can be in different formats, either structured, semi-structured or unstructured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI takes these factors into consideration and ensures that the data loader can consume and process these heterogeneous types of logs in a seamless way, by converting these logs into log record with unified log data model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='2 Reusable components to avoid duplicated effort As briefly motivated in Sec 1, a particular challenge of building log analytics in both academic and industrial settings, is the lack of an unified framework that allows reusal of data processing and information extraction components across different log analysis tasks, even on the same data source or dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For instance, engineers and researchers have to build separate pipelines to perform log anomaly detection, log clustering or summarization even to deal with the same log data source.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This burden significantly impacts efficiency in every development stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' from experiments, prototyping all the way to productization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Also running multiple pipelines in production increases the system complexity and brings additional operational cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Thus, building a library that unifies the interface of common components across multiple downstream tasks is necessary to improve efficiency of all stages of log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='3 Unified setup process for customized applications Even for the same application, the design choice behind the log analysis pipeline might have different variations, based on the various needs or limitations of the use-case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For example, log anomaly detection may involve different steps in the end-to-end (E2E) workflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Some may include log parsing, while others might choose to skip this step either due to the computational overhead or simply because the downstream analysis models do not need a well-defined parsed structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Also, when converting the raw log text data to machine-readable vectors there can be various choices - either to convert log messages into time-series counter vectors or into event sequences by representing each log line as a id or as a sequence of natural language tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In production setup, adding, removing or replacing a component in the E2E workflow could be very time consuming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI is designed to support building customized applications with easy plug-in / plug-out components, enabling users to quickly try out various combinations through simple intuitive mechanisms like configurable json or yaml files.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='4 Easy-to-use GUI for out-of-box interactive log analysis Another learning while we work with different types of log data is about visual examination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Unlike many machine learning domains where the model performance evaluation can heavily rely on metrics, such as Precision, Recall, F-scores, log analysis tasks usually need more visual examination to validate the performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Thus, LogAI is developed with a graphic user interface (GUI), or a portal, to integrate with interactive analytical features for tasks such as log summarization, clustering and anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' We believe this portal can reduce the cognitive overhead on the LogAI users in onboarding to the library and help them execute the log analysis tasks quickly and intuitively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 4 Architecture LogAI is separated into the GUI module and core library module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The GUI module contains the implementation of a GUI portal that talks to backend analysis applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The portal is supported using Plotly Dash (https://plotly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='com/dash/).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The core library module contains four main layers: data layer, pre-processing layer, information extraction layer and analysis layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Each layer contains the components to process logs in a standard way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI applications, such as log summarization, log clustering, unsupervised log anomaly detection, are created on top of the components of the four layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1 Core Library Modules LogAI is implemented in the architecture described in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In this section we describe the technical details of each layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Including the implementation of components and how the components communicate across layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 4 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al Figure 2: LogAI Architecture 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1 Data Layer Data layer contains two component classes: LogRecordObject class and DataLoader class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogRecordObject class defines the data model of log records.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' As we mentioned in Introduction, logs are free-form text and can be unstructured or semi-structured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Even for structured logs, different software applications may name their log data in different ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogRecordObject is to adapt log data from different sources to a more unified structure in order to provide a data object that can be used in all follow-up processes without modification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In LogAI, data model of LogRecordObject is a subset of the log and event record definition by OpenTelemetry (https://opentelemetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='io/), containing fields in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Table 1: LogRecordObject Data Model Field Description Timestamp Timestamp when event occurred.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Body loglines or the content of log messages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Attributes a map for structured information of log record.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' TraceId Request trace id as defined in W3C Trace Context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Can be set for logs that are part of request processing and have an assigned trace id.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This field is optional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' SpanId Trace flag as defined in W3C Trace Context specification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' At the time of writing the specification defines one flag - the SAMPLED flag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This field is optional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' SeverityText String represents the severity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This field is optional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' SeverityNumber Numeric values of severity, TRACE(1-4), DEBUG(5-8), INFO(9-12), WARN(13-16), ERROR(17-20), FATAL(21-24).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This field is optional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Resource Description of.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' the source of the log.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' InstrumentationScope Multiple occurrences of events coming from the same scope can happen across time and they all have the same value of InstrumentationScope.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' DataLoader is a class that implements functions to load data from sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In current version we implement FileDataLoader to load data from local files, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='log,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='csv,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='tsv,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='json.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The associated DataLoaderConfig class defines the configuration of how data will be loaded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' load_data() method will load data from target source and return LogRecordObject.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In the future versions we will support data loaders with connectors to consume data directly from log platforms such as Splunk, Datadog, AWS Cloudwatch, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='2 Preprocessing Layer Preprocessing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Preprocessor is a class to conduct logline level preprocessing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Users can initialize a preprocessor instance with configuration and execute .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='clean_log() method to obtain cleaned loglines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The supported configuration includes custom_delimiters_regex to parse logs with custom delimiters and custom_replace_list to identify and replace the substrings that match regex patterns in this list, examples are show in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 5 Data Layer preprocessing Layer Information Extraction Layer Application Layer Custom log Log datafiles FileDataLoader Preprocess Auto-Parsing Summarization Loglines Unstructured) Open Log OpenDataset Log Feature Log Datasets Records Vectorization DataLoader Extraction Clustering Attributes (Structured) Log streams from log Connector Categorical Log Anomaly Partitioning platforms DataLoader Encoding DetectionCheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al Figure 3: Example of preprocessor execution Partitioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Partitioner is a class that helps partitioning the logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' As part of the preprocessing, there are needs to shuffle, concatenate and sequentialize raw logs into different forms, for example using time-based partitions or identifier-based partitions or sliding window partitions of fixed lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This class provides optional functions for this type of process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='3 Information Extraction Layer Information extraction layer contains modules to convert log records into vectors that can be used as input of machine learning models for the actual analytical tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Current log analysis research and applications indicate three main input data types are used in the ML approaches: 1) converting log records into counter vectors to use time-series ML techniques, 2) converting log records into feature vectors to use tabular-based ML techniques and 3) converting log records into sequences to use sequential ML techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI implemented four components in the information extraction layer to extract information from the log records and convert logs to the target formats.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log parser component implements a series of automatic parsing algorithms in order to extract templates from the input loglines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log vectorizer implements a bag of vectorization algorithms to convert free-form log text into numerical representations for each logline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Categorical encoder implements algorithms that encoding categorical attributes into numerical representations for each logline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Last but not least, feature extractor implements methods to group the logline level representation vectors into log event level representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Automated Log Parsing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogParser is a class that conducts automated log parsing tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Currently LogAI covers three automated log parsing algorithms: DRAIN[7], IPLoM[8] and AEL[9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogParser takes the unstructured logline text as input and generate two sets of results: parsed_logline are the static pattern of all logs in this category, parameter_list are the lists of values for each “*” position in the log pattern for the same set of loglines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log Vectorization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogVectorizer is a class that converts unstructured loglines into semantic vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Each semantic vector is an array of numeric values that represents this logline text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogVectorizer supports popular text vectorization algorithms such as TF-IDF [10], FastText [11], Word2Vec [12], etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Categorical Encoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' CategoricalEncoder is a class that encodes log attributes, the structured portion of logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The string type attributes will be transformed into categorical representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' CategoricalEncoder supports popular categorical encoding algorithms such as label encoding, one-hot encoding, ordinal encoding etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Feature Extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' FeatureExtractor is a class that conducts final transformation of raw log data into log feature set that machine learning models can consume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In LogAI, we primarily cover three types of log features: 1) time-series counters, 2) semantic feature sets and 3) sequence vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Time-series counters will be used to feed time-series models such as ETS, ARIMA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Semantic feature set can be widely used in a variety of machine learning and deep learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Sequence vectors are a specific type of feature format that are required by sequence-modeling based deep learning methods, for example Recurrent Neural Network or Convolutional Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 6 PreprocessorConfig Raw logs config =PreprocessorConfig( 20171223-22:15:29:615|Step_LSC|30002312|onExte.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='. custom_delimiters_regex=[r"\\l"], 20171223-22:15:29:633|Step_StandReportReceiver.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 3 20171223-22:15:29:635/StepLSC/30002312|proces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='. custom_replace_list=[ + 20171223-22:15:29:635|Step_StandStepCounter|30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=" (r'Step_lw+','') Clean logs ParameterList 20171223-22:15:29:61530002312on." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 20171223-22:15:29:63330002312on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [Step_LSC] 3 20171223-22:15:29:635 30002312 pr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 2 [StepStandReportReceiver] 20171223-22:15:29:63530002312 fl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 3 [Step_LSC] 4 [Step_StandStepCounter]Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='4 Analysis Layer The analysis layer contains modules that conduct the analysis tasks, including but not limit to semantic anomaly detector, time-series anomaly detector, sequence anomaly detector, clustering, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Each analysis module provides unified interface for multiple underlying algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Anomaly Detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' AnomalyDetector is a class to conduct anomaly detection analysis to find abnormal logs from semantic perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' AnomalyDetector takes log features of the given logs as input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The output are the anomaly scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI supports two different types of anomaly detection: 1) anomaly detection based on log counter vectors, 2) anomaly detection based on log semantic representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The supported anomaly detection algorithms includes univariate and multivariate time-series analysis algorithms from Merlion [13], unsupervised outlier detection models like one-class SVM [14] and local outlier filter (LOF) [15] from scikit-learn [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Deep-learning based anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' NNAnomalyDetector class supports deep-learning model based log anomaly detection algorithms, most of which are taking log sequence vectors as input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI integrate some of the popular deep learning based algorithms like recurrent neural network (RNN) based model LSTM [17], convolutional neural network (CNN), Transformers [18] and pretrained Transformer based Language Model BERT [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The output are anomaly scores for each log sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Clustering is a class to conduct log clustering analysis tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The input for log clustering are the semantic log features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Clustering is integrated different clustering models, such as k-Means [20], DBSCAN [21] etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The output is a map between each log feature record and a cluster label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='5 E2E Applications Depending on the component modules from data layer, preprocessing layer, feature extraction layer and analysis layer, LogAI provides the flexibility to build end-to-end log analysis applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' And the applications follows below design principles 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI is launched with several out-of-the-box applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Figure 4: Design Principles of E2E Applications Log Summarization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' It is very important to understand your logs before using them for downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log summarization leverages machine learning to process, aggregate and summarize logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Please refer to the GUI module Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='2 for more detail about how to use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log Clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log clustering can be used to categorize logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Finding meaningful clusters can bring benefits in a variety of use cases like anomaly detection, log storage, query, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Please refer to the GUI module Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='2 for more detail about how to use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log Anomaly Detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log anomaly detection is an application that detect anomalous loglines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Here in LogAI log anomaly detection can detect both time-series anomalies and semantic anomalies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Please refer to the GUI module Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='2 for more detail about how to use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 7 Log record Data Preparation ApplicationWorkflow objects Workflow Configuration Dataloader Preprocessing IE-component-N Analysis-Component Configuration Configuration Configuration Configuration FileDataloader Algorithm-1 Algorithm-1 Preprocessing OpenSetDataloader Algorithm-2 Algorithm-2 Partitioning OtherDataloader Algorithm-3 Algorithm-3Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al Figure 5: LogAI GUI portal 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='2 GUI Module The GUI module is implemented to provide a web portal for the out-of-the-box log analysis applications, including log summarization, log clustering and log anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Figure 5 shows the log summarization of LogAI portal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI portal is developed using Plotly Dash framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Control Panel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Control panel is on the left side of the page.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In the control panel, users can upload files, configure file and algorithm settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' When the user click "Run" button, the analysis execution is triggered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This behavior is uniform for all three different applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' After analysis execution completed, the results will be displayed on the right side of the page.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Main Display Panel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' On the right side of the page we display the analysis results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Different applications may have different layouts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The portal supports interactive visualization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The users can click or hover on parts in the charts to drill down and get more detailed information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The interaction between frontend and backend of different applications are designed to be unified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The control panel collects user input and generate configuration for application and send to backend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Backend consumes the configuration to create component instances to execute the workflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' After finishing the job, it will send the result table to frontend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The display panel for each application controls how the result table will be rendered for visualization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Users can expand the GUI portal to support customized analysis applications by following the same design pattern and reusing the existing components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='3 Summary of Supported ML Algorithms in LogAI This section summarizes the machine learning algorithms supported in LogAI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI provides an algorithms compo- nent to implement all supported algorithms with algorithm factory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The algorithm contains five algorithmic mod- ules, notably: parsing_algo, vectorization_algo, categorical_encoding_algo, clustering_algo, anomaly_detection_algo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' algorithms component also contains a nn_model module to implement all neural network models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI has defined unified algorithm interfaces for each module and we can implement more algorithms and integrated it with LogAI in future development.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The current LogAI algorithm coverage is shown in Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The deep-learning models generally being much more parameter-heavy, require more high-end compute devices like GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In such cases, their LogAI implementations provide options to use different devices (CPU or GPU) or multiple GPUs seamlessly through the algorithm parameter configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 8 a LOG Al Powered by Salesforce AI Research Al-based Log Analysis Summary Attributes Log.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Summarization Total Number of Loglines: 20000 Level Log.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Clustering Total Number of Log Patterns: 15 INFO AnomalyDetection Charts Trend of Occurrence at Freq(1s) File Settings Log Type HDFS 50 1000 Select Log File HDFS_20000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='log 40 Attributes ×Level Time Interval 1s 10 Parsing Algortihm DRAIN 20:35:30 20:36:00 20:36:30 20:37:00 20:37:30 20:38:00 20:38:30 Nov 9, 2008 RUN log pattern timestamp Log Patterns dfs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='FSNamesystemBLoCK*NameSystem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='addStoredBlockblockMap updated*50010isaddedto* size* Dynamic Values Position Count Value POSITION_O 4123 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='215.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='192,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='196,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='159,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='250.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='114,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='194.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='245,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='239,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='4,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='246,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='6,10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' POSITION_1 4123 b1k_-8426566918839220582,blk_8892946833207246710,blk_4685864904040870678,blk_3733773024533525840,blk_1329134914737185064,blk_-542351385442Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Table 2: Summary of supported machine learning algorithms in LogAI ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Module ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Algorithm ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Task ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Log parser ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='DRAIN ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Information Extraction ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='IPLoM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='AEL ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Log vectorizer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Fast-text ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Unstructured Log Representation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='TF-IDF ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Word2Vec ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Semantic ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Sequential ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='BertTokenizer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Categorical Encoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Label encoding ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Structured Log Representation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='OneHot Encoding ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Ordinal Encoding ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Clustering ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='DBSCAN ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Analysis: Log Clustering ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='K-means ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='BIRCH ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Anomaly Detection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='One-class SVM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Analysis: Outlier Detection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Isolation Forest ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='LOF ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Distribution divergence ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='ETS ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Analysis: Time-series Anomaly Detection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Dynamic Baseline ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='ARIMA ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='NN models ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='CNN ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Analysis: Sequential Anomaly Detection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='LSTM ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Transformers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='LogBERT ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Analysis: (Sequential / Non-Sequential) Anomaly Detection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='Experiments: Benchmarking Log Anomaly Detection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='In this section,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' we elaborate some of the experimental effort at building pipelines for specific log analysis tasks on publicly available log datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The purpose of this is to benchmark the performance of our LogAI library on these standard tasks with the performances reported in existing literature or other well-known log libraries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Amongst the different log analysis tasks, log based anomaly detection is perhaps the most objective task, where domain experts like reliability and performance engineers can provide some supervision around which log sequences show anomalous behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The other tasks like log clustering, summarization are much more subjective in nature while log based root cause analysis is too specific and tightly coupled with the application or environment it is deployed in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Hence for these tasks it is often impossible to collect supervision labels for benchmarking purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Consequently most of the publicly available log analysis datasets and benchmarks have focused on the anomaly detection task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' While a small subset of these datasets have also been redesigned to serve log clustering and log summarization in past literature, they can at best be considered as pseudo-oracle data for these tasks and are still are not large-scale enough for benchmarking purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Hence, for this reason, in our LogAI library we focus on benchmarking only the log based anomaly detection task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Following the advances of Artificial Intelligence (AI), Machine Learning (ML) and Natural Language Processing (NLP), for log anomaly detection task also traditional statistical ML based solutions (like SVM, Isolation Forest etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=') have gradually given way to more powerful and sophisticated neural models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Some of these newer models can leverage self-supervised learning to achieve comparable anomaly detection performance in unsupervised settings in comparison to older traditional supervised models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Additionally, the traditional ML models having being around for quite a while, have been more extensively studied with fairly well-reproduced benchmarks in existing literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Hence in our benchmarking experiments, we have only focused on the more recent neural models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 9 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1 Limitations of Existing Libraries and Benchmarking Practices Over the past decade have been numerous literature [22, 23, 24, 25, 26, 27, 28, 29, 3] reporting the log anomaly detection performance on some of the standard open-sourced log datasets, as well as various effort at open-sourcing libraries catering the log anomaly detection task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For example, [2, 3] had released libraries (Loglizer and Deep-Loglizer) for log based anomaly detection using traditional machine learning and more recent deep learning models, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In their library they had consolidated some of the benchmarking effort, bringing together all the popular log anomaly detection models for a more fair comparison on a few public log datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' However, despite this, there is still a lack of rigorous standardisation and benchmarking amongst these works, especially the ones employing neural models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Below we list some of the specific limitations of Loglizer and Deep-Loglizer library which necessitates the need for an unified, generic framework for log analysis tasks: Generic Log Data Processing Pipeline: There is a lack of libraries that provide a generic data processing pipeline that is common across different log datasets or different log anomaly algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' While Loglizer [5] and Deep- Loglizer [3] has achieved this to some degree, they still require some dataset-specific preprocessing and customization which are quite open-ended.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For users wishing to replicate on their own datasets or other public datasets, there is no clear framework guiding the necessary steps and output-structure of the dataset-specific preprocessing to follow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' On the other hand, LogAI library provides a an unified generic data-processing pipeline across all public datasets and log analysis algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' It only requires a very minimal dataset-specific customization with a clear template of the kind of preprocessing needed for each dataset - for e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' each dataset has its own way of specifying the fields of the LogRecordObject (governed by OpenTelemetry data models) e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' labels or identifiers of the loglines - which are either directly part of the raw log data or have to be derived based on some rules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Catering to multiple Log Analysis Tasks: There is a lack of libraries that can cater to all kinds of log analysis tasks (including log clustering, summarization, anomaly detection etc) under a single generic platform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Each of the existing log libraries are tailored for a specific kind of log analysis task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For example libraries like loglizer and Deep-Loglizer specifically focus on log based anomaly detection, log-parser on parsing log data and log3C cater to clustering and correlation specific analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' On the other hand, logAI enables all of these analysis tasks along with others, like, summarization, visualization etc under an unified framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Coverage of Log Analysis Models: The existing Loglizer library provides the more traditional machine learning algorithms for log based anomaly detection, with the Deep-Loglizer being a deep-learning based counterpart of it, providing only neural ML models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI on the other hand, provides a generic framework encompassing most of the popular AI/ML algorithms - starting from traditional statistical ML models to popular neural models as well as more recent pretrained Transformer (BERT) based models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Going ahead, our logAI library can provide a more extended platform for integrating with more upcoming and powerful neural models as the mainstream deep learning research progresses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For all of these models, logAI provides a single unified data processing platform, that is independent of the kind of downstream analysis task or models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Thus, with LogAI library, we aim at a more intuitive and easy-to-use log analysis framework for practitioners of different areas and levels of expertise to perform log analysis, without being impeded by the technical nuances of the task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='2 Log based Anomaly Detection Workflow In order to handle the complex and heterogenous nature of log data, log based anomaly detection typically follows a multi-step pipeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Starting with the raw log data dump or data streams, the log analysis workflow does some initial preprocessing and cleaning-up of the raw logs to make them amenable to ML models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This is typically followed by log parsing which extracts a loose structure from the semi-structured data and then performs grouping and partitioning of the log lines into log sequences in order to model the sequence characteristics of the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' After this, the logs or log sequences are vectorized i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' represented as a machine-readable vector, by first tokenizing each instance and converting each token to a d-dimensional embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' On this vectorized version of the log data, various anomaly detection models can be applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The choices of each of these steps (for e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' whether to apply parsing or not, or whether to partition based on sessions or sliding windows, or whether to apply clustering or not) can be guided by various factors - nature of the application generating the log data or the model requirements or other efficiency or performance related constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' i) Log Preprocessing: In LogAI, this step involves handling the formatting of timestamps, logline-identifiers and any associated labels (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' anomaly labels) in the raw log data to make it compatible to openTelemetry data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Additionally it also provides customised filtering of specific regular expression patterns (like IP addresses or memory locations or file paths) that are deemed irrelevant for the actual log analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 10 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al Figure 6: Example of Log Parsing ii)Log Parsing: To enable downstream processing, un- structured log messages first need to be parsed into a structured event template (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' constant part that was ac- tually designed by the developers) and parameters (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' variable part which contain the dynamic runtime informa- tion).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Figure 6 provides one such example of parsing a logline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In LogAI library we provide three popular log parsers which use heuristic-based techniques - Drain [30], IPLoM [31] and AEL [32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' iii) Log Partitioning: After parsing the next step is to partition the log data into groups, based on some seman- tics where each group represents a finite chunk of log lines or log sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The main purpose behind this is to decompose the original log dump, which typically consists of millions of log lines into logical chunks, so as to enable explicit modeling on these chunks and allow the models to capture anomaly patterns over sequences of log templates or log parameter values or both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In literature, various Log partitioning techniques have been applied [27, 33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In LogAI we provide different schemes like - Fixed or Sliding window based partitions, where the length of window is determined by length of log sequence or a period of time, and Identifier based partitions where logs are partitioned based on some identifier (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' the session or process they originate from).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Figure 7 illustrates these different choices of log grouping and partitioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' A log event is eventually deemed to be anomalous or not, either at the level of a log line or a log partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Figure 7: Different types of log partition- ing iv) Log Vectorization: After log partitioning, the next step is to represent each partition in a machine-readable way (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' a vector or a matrix) by extracting features from them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This can be done in various ways [34, 33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In LogAI we provide the following vectorization techniques - i) sequential representation which converts each partition to an ordered sequence of log event ids ii) quantitative representation which uses count vectors,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' weighted by the term and inverse document frequency information of the log events iii) semantic representation captures the linguistic meaning from the se- quence of language tokens in the log events and learns a high-dimensional embedding vector for each token in the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The nature of log representation chosen has direct consequence in terms of which patterns of anomalies they can support - for example, for capturing keyword based anomalies, semantic representation might be key, while for anomalies related to template count and variable distribution, quantitative representations are possibly more appropriate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The semantic embedding vectors themselves can be either obtained using pretrained neural language models like GloVe, FastText, pretrained Transformer like BERT, RoBERTa etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Or they can also be learnt from scratch on the available training data, by building custom vocabulary and using these neural language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' v) Log Anomaly Detection Models for benchmarking: The task of log based anomaly detection is to analyze a dump of log data, consisting of a series of timestamped log lines and identify the anomalous log lines that are potentially incident-indicating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Based on the kind of application, log anomaly signals can either be used to detect or localize an already occurred incident or disruption or used to forecast future potential faults or failures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In literature, log based anomaly detection models have been broadly categorized into two types - Supervised and Unsupervised, based on the kind of training framework they follow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Since our objective is to benchmark only neural models, we limit our discussion in this section to this class of models alone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Supervised Anomaly Detection models require the anomaly label to be available at the level of each log line or a log group or partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Furthermore, they typically assume that each of the training, development and test data will contain a mix of both anomalous and non-anomalous log data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' These models use the supervised losses like cross entropy loss or squared error loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' But they can suffer due to the under-representativeness of the anomalous class of logs, especially if they occur very rarely in the training and development data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Due to their direct dependency on modeling the anomalous class explicitly these models also lack robustness when the anomaly distribution changes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 11 LogLine: 081109 204655 556 INFO dfs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='DataNode$PacketResp0nder: Received block blk_3587508140051953248 of size 67108864 from /10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='84 Timestamp: 081109 204655 556 Level: INFO Component: dfs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='DataNodesPacketResponder Template: Received block <*> of size <*> from <*> Parameter:["blk 3587508140051953248",“67108864",“10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='84"]Fixed Partitions Sliding Partitions Identifier Partitions 1 2 1 1 2 2 3 3 3 1 2 1 1 2 2 3 3 3 2 2 2 3 3 3Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al Unsupervised Anomaly Detection models do not require any anomaly label for the log data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' But the existing unsupervised models in the literature typically assume that the entire training data is comprised of only normal or non-anomalous logs and generally show a sharp decline in performance when the training data is adulterated with even a small fraction of anomalous logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Amongst the most popular unsupervised anomaly detection models, mainly two paradigms have been followed: Forecasting based models: These models learn the representations of the log lines through a forecasting based self-supervision i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' by learning to predict the label of next log line given an input context of log sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For all of these models, following Deep-Loglizer paper, the label has been taken as the event id of the next log line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This category of models includes various sequence encoding networks that have been popular in deep-learning - like recurrent neural network or convolutional neural network based models or the more recent, more powerful self-attention based Transformer models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' These models are typically trained a cross-entropy loss between the true and predicted distributions, which aims to maximise the likelihood of the true label, conditional to the given input sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Reconstruction based models: This includes Auto-encoder based models which try to reconstruct a given sequence of loglines through a learnable hidden layer that learn an n-dimensional representation of each log-line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The other more recent models in this category are Transformer based models which are trained using masked-language modeling principles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' During training a certain fraction of the input tokens would be masked and the model would learn to predict these tokens using the remaining input context;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' and in the process learning the contextual representation of each token in a log-line or a log-sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This is the fundamental principle behind BERT Language model with the masked language modeling providing the learning objective when training on the log data in a self-supervised way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Forecasting based Anomaly Detection: For our benchmarking with forecasting based models, we select three core deep learning models which have been the basis of the some of the most popular recent neural log anomaly detection methods LSTM: This model corresponds to a long-short term memory (LSTM) network to encode a given log sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' It also provides various options - i) whether to utilize uni-directional or bi-directional encoding of tokens in a given input sequence ii) whether to have a learnable attention network over the input sequence, which linearly combines the hidden representations with the attention weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' CNN: This model corresponds to a convolutional neural network (CNN) to encode a given log sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Different convolutional layers with different shape settings are applied on the input followed by a 1-d max-pooling operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The outputs from each of these are then concatenated and fed into a fully-connected layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Transformer: This model corresponds to a Transformer based encoder network with a multi-headed self-attention mechanism to encode a given log sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Since the Transformer outputs a d-dimensional representation for each token in the input log-sequence, a mean-pooling operation is applied over those representations, to get a fixed representation for the entire sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Since the LSTM, CNN and Transformer models need a d-dimensional representation of each log, first an embedding layer is applied to the raw log input features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In case of sequential feature representation, each log event id is embedded as a d-dimensional vector, while for semantic feature representation, the embedding layer is initialized with the pretrained embeddings (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Word2Vec or FastText etc) and embeds each log token id to a d-dimensional vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The output of the LSTM, CNN or Transformer a fixed d-dimensional representation of the input sequence which is then downward projected to 1-d space, followed by a softmax layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For supervised versions of these models, since the explicit label (anomalous or not) exists for each log-line or log-sequence, the output of the softmax layer is aimed to directly predict this label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For forecasting based unsupervised versions, the output of the softmax layer is aimed to predict the id of the next log-line, that is succeeding the given input log sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' During inference, for forecasting based unsupervised models make a prediction for a given input log sequence, which is then compared against the actual log event following the input sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' We follow the similar inference strategy as [3] and predict a test instance as anomalous if the ground truth is not one of the k (=10) most probable log events predicted by the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' A smaller k imposes more demanding requirements on the model’s performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In literature, LSTM based models have been used by DeepLog [35], LogAnomaly [34] and LogRobust [36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' While DeepLog uses sequential representations, where each log message is represented by the index of its log event, LogAnomaly uses semantic representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' While both of these use unidirectional LSTM in an unsupervised setting, LogRobust uses supervised version of an bi-directional LSTM with the attention network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' CNN has been used by [37] but only in a supervised setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Transformer based model has been applied in LogSy [38], but they additionally use auxiliary log datasets as pseudo-anomalous data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This helps them to learn a better representation of normal log data from the target system of interest while regularizing against overfitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In order to ensure better reproducibility, in our benchmarking we do not use any additional log datasets and hence in some of the supervised settings, our Transformer based models suffer from overfitting issues and yield somewhat poorer results and are not directly comparable to the 12 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al results obtained by [37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Following [3] for all of these models, in both the supervised and unsupervised settings, we report the F1-Scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Reconstruction based Anomaly Detection: For our benchmarking with reconstruction based models, we select the LogBERT model from the work LanoBERT [39].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Following that literature, the preprocessing configurations are set before the BERT model can be applied - i) Since LogBERT is a parser-free technique, no log parsing is applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' ii) For obtaining the vectorized log representation, the preprocessed log sequences are tokenized using the WordPiece (Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 2016) model used in BERT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' iii) The tokenizer is trained from scratch on each log dataset to ensure that the dataset-specific custom vocabulary can be learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' During training the usual masked language modeling principles of BERT is followed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' During inference, multiple masked versions of each test instance is generated, by passing a fixed-size masking window over the token sequence, ignoring masking of special characters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Thus a test instance of sequence length N will result in an average of N n masked instances, each have a masked n-gram of length upto n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' After running the inference on the masked test instance, the anomaly score is obtained as the average of the top-prediction probabilities (or log-probabilities) over the k-most confident masked tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Following LanoBERT, we report AUROC (Area under ROC) metric over this anomaly score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' All unsupervised models, (forecasting or reconstruction based) are trained only on the normal log data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Following Deep-Loglizer, for the forecasting based models, around 20% of the data and for LogBERT, following LanoBERT, around 30% of the data is sequestered for test.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' These percentages include the entire set of anomalous logs in the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In LogAI, we take out 10% of the training data as development set for validation and model selection purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='3 Datasets: Following Deep-Loglizer and LanoBERT, we perform our benchmarking experiments on two of the most popular public log anomaly detection datasets - HDFS and BGL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Additionally for LogBERT we also benchmark on the public dataset, Thunderbird.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Further, similar to Deep-Loglizer, for BGL dataset we also perform a fixed-window based log partitioning by grouping log-lines over every 6-hourly window.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' However for LogBERT model, following LanoBERT, we treat each individual log-line as a train or test instance, without doing any log partitioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' On the other hand, for HDFS dataset, since anomaly labels are available only at the level of each session-id (which is also known as BLOCK in the raw dataset), we use identifier based log partitioning, by constructing log-sequences for each session-id.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' These resulting log partitions are treated as the training or test instances for all algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='4 Experimental Settings and Results: For our benchmarking we conduct experiments on the above choice of anomaly detection algorithms under various settings and compare our experimental results with those published in Deep-Loglizer [3] and LanoBERT [39] papers In Table 3 we list the performance of the different supervised and unsupervised forecasting-based models (LSTM, CNN and Transformer), while 4 shows the results using the unsupervised reconstruction-based LogBERT model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Evaluation Metrics: In order to compare the performances, for all supervised and unsupervised forecasting-based models we use F1-Score as the metric, following Deep-Loglizer paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Whereas, for LogBERT, following LanoBERT paper we report the AUROC metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LanoBERT paper also provides F1-Score, but the F1-Score calculation needs fixing a threshold, which is challenging to do over the training data that only has normal logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' According to the paper, their reported scores are the best F1 value that was calculated using the threshold that yields the best performance for the test dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This is not a fair metric, as it involves label-knowledge of the blind test set and hence we only compare using AUROC metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Configuration Settings for Evaluation: For each of LSTM and Transformer models, we benchmark on 8 different configuration settings for each dataset - based on the kind of supervision (supervised or unsupervised), whether log parsing is applied or not, whether the log representation is sequential or semantics based.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For CNN models, we found the semantics based log representation results in very slow convergence rate, hence we have benchmarked our results using only the sequential feature representations of the logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' On the other hand, Deep-Loglizer showcases only specific settings for these models - for e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' forecasting based unsupervised anomaly detection is done using Unidirectional LSTM with no-attention and Transformer network while supervised models are Bidirectional LSTM with attention and CNN network, whereas all of these methods can be applied on both supervised and unsupervised settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Each of their models use the Log Parsing step and have two variants that use sequential and semantic feature representations for the logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' However Deep-Loglizer paper [3] provides only 8 configurations for each dataset whereas LogAI is benchmarked on a more exhaustive set of 20 configurations per dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Performance Comparison: In most of these configurations the performance achieved by LogAI is comparable to that of Deep-Loglizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The 2-3% difference in performance between the models is not quite statistically significant and can 13 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al Model Details Supervision Log Pars- ing Log Represen- tation HDFS BGL LogAI Deep- Loglizer LogAI Deep- Loglizer LSTM Unidirectional, No Attention Unsupervised \x13 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='981 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='944 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='938 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='961 semantic 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='981 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='945 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='924 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='967 \x17 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='979 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='925 semantic 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='981 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='924 Bidirectional, With Attention Supervised \x13 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='984 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='983 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='983 semantic 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='964 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='964 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='95 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='983 \x17 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='989 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='931 semantic 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='971 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='983 CNN 2-D Convolution with 1-D Max pooling Unsupervised \x13 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='981 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='929 \x17 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='981 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='922 Supervised \x13 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='943 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='97 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='983 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='972 \x17 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='946 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='990 Transformer Multihead single- layer self-attention model, trained from scratch Unsupervised \x13 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='971 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='905 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='933 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='956 semantic 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='978 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='925 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='921 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='957 \x17 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='92 semantic 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='975 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='917 Supervised \x13 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='934 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='986 semantic 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='784 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='963 \x17 sequential 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='945 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='994 semantic 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='915 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='977 Table 3: Comparison between different supervised and unsupervised Forecasting-based neural anomaly detection models in LogAI and Deep-Loglizer library [3], using F1-Score as the performance metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The dashed (-) cells indicate that there are no reported numbers in the Deep-Loglizer paper corresponding to those configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' mostly be attributed to the following factors: Following the implementation open-sourced by authors of Deep-Loglizer in https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='com/logpai/deep-loglizer, it is evident that the library does not utilize any development (or validation) set and directly performs model selection based on the test performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI on the other hand, selects the model checkpoint on the validation performance and reports the results on the blind test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Secondly, because of the same reason the resulting the training and test splits used by LogAI and Deep-Loglizer are not identical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Especially for BGL data, perhaps the performance difference is somewhat more observeable, since both libraries apply fixed time-partitions of 6 hours and reports the evaluation at the level of the partitions, instead of the logline level evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' This also adds to the possibility of more significant differences in the training/test data setup between the two models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For Transformer based models, especially in the supervised setting, we observe a reduced performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Similar effect had been studied in the original work [38] that used Transformer model as Log Anomaly Detector in the supervised setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Their model suffered from overfitting on the target system’s log data due to the presence of only rare and sparse anomaly patterns in the train data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' To overcome the overfitting issue, they additionally involve other external system’s logs as auxiliary data - treating them as pesudo “anomalous” logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' But in order to keep our benchmarking reproducible, we do not use any additional auxiliary data abd subsequently report a poorer performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The Deep-Loglizer paper also benchmarks with only the unsupervised setting of the Transformer model, which is much less prone to overfitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For LogBERT model, we benchmark the test results taking various inferencing strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Given a test instance, which has been converted to multiple masked versions (each having a continuous n-gram masked), either we average the inference score either over all the masked tokens or over the top-6 most confident ones, based on the the model prediction likelihood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' For the latter we consider different inference scores - mean predictive loss or maximum predictive probability or log probability or the entropy of the prediction distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' All of these metrics are quite correlated and our objective is to simply show that our LogBERT implementation yields reasonably stable results across these different inferencing strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' While LanoBERT also uses Predictive Loss and Probability based scores, they provide AUROC evaluation metric metric only for the latter and they also evaluate only HDFS and BGL dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In the predictive probability based inference strategy, results obtained by LogAI and LanoBERT are quite comparable, with small differences owing to the variability of the train, test splits used in the two implementations (The authors of LanoBERT have used their own train test split due to the general lack of standardized data splits for these datasets).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 14 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al Inference Strategy Datasets HDFS BGL Thunderbird LogAI LanoBERT LogAI LanoBERT LogAI LanoBERT Averaged over all masked tokens Mean Predictive Loss 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='983 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='998 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='953 Averaged over top-6 most-confident masked tokens Mean Predictive Loss 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='964 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='937 Max Predictive Prob.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='976 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='972 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='972 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='953 Max Predictive LogProb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='976 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='969 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='917 Mean Predictive Entropy 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='976 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='973 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='967 Table 4: Comparison of LogBERT model performance achieved by our LogAI library and by LanoBERT [39], using the AUROC metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Both versions of the model are in unsupervised setting (trained on normal logs only) and do not need any log parsing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' The dashed (-) cells indicate that there are no reported numbers in the LanoBERT paper corresponding to those configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Overall our experiments on the suite deep learning based log anomaly detection models suggests that their implemen- tations in the LogAI library is able to reproduce the established performance benchmarks on standard open-source datasets with reasonable accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Additionally, owing to a more generic data processing pipeline we are seamlessly able to extend to a more exhaustive set of experimental settings, than what has been explored or implemented before in existing literature and libraries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 6 Conclusion In this technical report we introduced LogAI, an open source library for AI-based log analytics and intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI library uses the same unified log data model as OpenTelemetry to ensure the analytical processes to be agnostic to any log platforms that supports OpenTelemetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI also abstracts common processes in different downstream tasks and provides reusable components to execute these processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI also provides a large varieties of AI capabilities, from time-series anlaysis, traditional statistical learning to deep learning and pre-trained transformer models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' We showed how LogAI can be used to conduct a variety of common log analysis tasks such as log summarization, clustering and anomaly detection and also provide extensive benchmarking results on Log Anomaly Detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' LogAI version v0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='0 is released as open-source code under BSD-3-Clause license.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Our team will provide continuous support and further improvements in the future versions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Acknowledgments We would like to thank a number of leaders and colleagues from Salesforce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='com Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' who have provided strong support, advice, and contributions to this open-source project.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' References [1] The Business Research Company.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log Management Global Market Report.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [2] Shilin He, Jieming Zhu, Pinjia He, and Michael R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Lyu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Experience report: System log analysis for anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In 2016 IEEE 27th International Symposium on Software Reliability Engineering (ISSRE), pages 207–218, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [3] Zhuangbin Chen, Jinyang Liu, Wenwei Gu, Yuxin Su, and Michael R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Lyu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Experience report: Deep learning-based system log analysis for anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' CoRR, abs/2107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='05908, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [4] Jiang Zhaoxue, Li Tong, Zhang Zhenguo, Ge Jingguo, You Junling, and Li Liangxiong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' A survey on log research of aiops: Methods and trends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Mobile Networks and Applications, pages 1–12, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [5] Shilin He, Jieming Zhu, Pinjia He, and Michael R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Lyu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Experience report: System log analysis for anomaly detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In 27th IEEE International Symposium on Software Reliability Engineering, ISSRE 2016, Ottawa, ON, Canada, October 23-27, 2016, pages 207–218.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' IEEE Computer Society, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [6] Zhuangbin Chen, Jinyang Liu, Wenwei Gu, Yuxin Su, and Michael R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Lyu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Experience report: Deep learning-based system log analysis for anomaly detection, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 15 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al [7] Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R Lyu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Drain: An online log parsing approach with fixed depth tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In 2017 IEEE international conference on web services (ICWS), pages 33–40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' IEEE, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [8] Adetokunbo AO Makanju, A Nur Zincir-Heywood, and Evangelos E Milios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Clustering event logs using iterative partitioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining, pages 1255–1264, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [9] Zhen Ming Jiang, Ahmed E Hassan, Gilbert Hamann, and Parminder Flora.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' An automated approach for abstracting execution logs to execution events.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Journal of Software Maintenance and Evolution: Research and Practice, 20(4):249–267, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [10] Juan Ramos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Using tf-idf to determine word relevance in document queries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the first instructional conference on machine learning, volume 242, pages 29–48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Citeseer, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [11] Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Enriching word vectors with subword information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Transactions of the association for computational linguistics, 5:135–146, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [12] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Efficient estimation of word representations in vector space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' arXiv preprint arXiv:1301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='3781, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [13] Aadyot Bhatnagar, Paul Kassianik, Chenghao Liu, Tian Lan, Wenzhuo Yang, Rowan Cassius, Doyen Sahoo, Devansh Arpit, Sri Subramanian, Gerald Woo, Amrita Saha, Arun Kumar Jagota, Gokulakrishnan Gopalakrishnan, Manpreet Singh, K C Krithika, Sukumar Maddineni, Daeki Cho, Bo Zong, Yingbo Zhou, Caiming Xiong, Silvio Savarese, Steven Hoi, and Huan Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Merlion: A machine learning library for time series.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [14] Bernhard Schölkopf, John C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Platt, John C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Shawe-Taylor, Alex J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Smola, and Robert C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Williamson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Estimating the support of a high-dimensional distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Neural Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=', 13(7):1443–1471, jul 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [15] Markus M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Breunig, Hans-Peter Kriegel, Raymond T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Ng, and Jörg Sander.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Lof: Identifying density-based local outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data, SIGMOD ’00, page 93–104, New York, NY, USA, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [16] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Pedregosa, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Varoquaux, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Gramfort, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Michel, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Thirion, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Grisel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Blondel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Prettenhofer, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Weiss, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Dubourg, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Vanderplas, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Passos, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Cournapeau, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Brucher, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Perrot, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Duchesnay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Scikit-learn: Machine learning in Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Journal of Machine Learning Research, 12:2825–2830, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [17] Sepp Hochreiter and Jürgen Schmidhuber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Long short-term memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Neural Computation, 9(8):1735–1780, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [18] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Gomez, Łukasz Kaiser, and Illia Polosukhin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Attention is all you need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS’17, page 6000–6010, Red Hook, NY, USA, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Curran Associates Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [19] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' BERT: pre-training of deep bidirectional transformers for language understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Jill Burstein, Christy Doran, and Thamar Solorio, editors, Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4171–4186.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computational Linguistics, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [20] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Sculley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Web-scale k-means clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 19th International Conference on World Wide Web, WWW ’10, page 1177–1178, New York, NY, USA, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [21] Erich Schubert, Jörg Sander, Martin Ester, Hans Peter Kriegel, and Xiaowei Xu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Dbscan revisited, revisited: Why and how you should (still) use dbscan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Database Syst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=', 42(3), jul 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [22] Jiang Zhaoxue, Li Tong, Zhang Zhenguo, Ge Jingguo, You Junling, and Li Liangxiong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' A survey on log research of aiops: Methods and trends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Mob.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Netw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=', 26(6):2353–2364, dec 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [23] Shilin He, Pinjia He, Zhuangbin Chen, Tianyi Yang, Yuxin Su, and Michael R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Lyu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' A survey on automated log analysis for reliability engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' ACM Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Surv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=', 54(6), jul 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [24] Paolo Notaro, Jorge Cardoso, and Michael Gerndt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' A survey of aiops methods for failure management.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Intell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Syst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Technol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=', 12(6), nov 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [25] Xiao Han and Shuhan Yuan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Unsupervised cross-system log anomaly detection via domain adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 30th ACM International Conference on Information & Knowledge Management, CIKM ’21, page 3068–3072, New York, NY, USA, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [26] Van-Hoang Le and Hongyu Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Log-based anomaly detection with deep learning: How far are we?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 44th International Conference on Software Engineering, ICSE ’22, page 1356–1367, New York, NY, USA, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 16 Cheng et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' al [27] Nengwen Zhao, Honglin Wang, Zeyan Li, Xiao Peng, Gang Wang, Zhu Pan, Yong Wu, Zhen Feng, Xidao Wen, Wenchi Zhang, Kaixin Sui, and Dan Pei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' An empirical investigation of practical log anomaly detection for online service systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering, ESEC/FSE 2021, page 1404–1415, New York, NY, USA, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [28] Yichen Zhu, Weibin Meng, Ying Liu, Shenglin Zhang, Tao Han, Shimin Tao, and Dan Pei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Unilog: Deploy one model and specialize it for all log analysis tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' CoRR, abs/2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='03159, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [29] Jacopo Soldani and Antonio Brogi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Anomaly detection and failure root cause analysis in (micro) service-based cloud applications: A survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' ACM Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Surv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=', 55(3), feb 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [30] Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Lyu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Drain: An online log parsing approach with fixed depth tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In 2017 IEEE International Conference on Web Services (ICWS), pages 33–40, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [31] Adetokunbo A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Makanju, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Nur Zincir-Heywood, and Evangelos E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Milios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Clustering event logs using iterative partitioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD ’09, page 1255–1264, New York, NY, USA, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [32] Zhen Ming Jiang, Ahmed E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Hassan, Parminder Flora, and Gilbert Hamann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Abstracting execution logs to execution events for enterprise applications (short paper).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In 2008 The Eighth International Conference on Quality Software, pages 181–186, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [33] Mostafa Farshchi, Jean-Guy Schneider, Ingo Weber, and John Grundy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Experience report: Anomaly detection of cloud application operations using log and cloud metric correlation analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In 2015 IEEE 26th International Symposium on Software Reliability Engineering (ISSRE), pages 24–34, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [34] Weibin Meng, Ying Liu, Yichen Zhu, Shenglin Zhang, Dan Pei, Yuqing Liu, Yihao Chen, Ruizhi Zhang, Shimin Tao, Pei Sun, and Rong Zhou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Loganomaly: Unsupervised detection of sequential and quantitative anomalies in unstructured logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 28th International Joint Conference on Artificial Intelligence, IJCAI’19, page 4739–4745.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' AAAI Press, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [35] Min Du, Feifei Li, Guineng Zheng, and Vivek Srikumar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Deeplog: Anomaly detection and diagnosis from system logs through deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 2017 ACM SIGSAC Conference on Computer and Communications Security, CCS ’17, page 1285–1298, New York, NY, USA, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [36] Xu Zhang, Yong Xu, Qingwei Lin, Bo Qiao, Hongyu Zhang, Yingnong Dang, Chunyu Xie, Xinsheng Yang, Qian Cheng, Ze Li, Junjie Chen, Xiaoting He, Randolph Yao, Jian-Guang Lou, Murali Chintalapati, Furao Shen, and Dongmei Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Robust log-based anomaly detection on unstable log data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In Proceedings of the 2019 27th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering, ESEC/FSE 2019, page 807–817, New York, NY, USA, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [37] Siyang Lu, Xiang Wei, Yandong Li, and Liqiang Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Detecting anomaly in big data system logs using convolutional neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In 2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress, DASC/PiCom/DataCom/CyberSciTech 2018, Athens, Greece, August 12-15, 2018, pages 151–158.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' IEEE Computer Society, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [38] Sasho Nedelkoski, Jasmin Bogatinovski, Alexander Acker, Jorge Cardoso, and Odej Kao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Self-attentive classification-based anomaly detection in unstructured logs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' In 2020 IEEE International Conference on Data Mining (ICDM), pages 1196–1201, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' [39] Yukyung Lee, Jina Kim, and Pilsung Kang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' Lanobert : System log anomaly detection based on BERT masked language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' CoRR, abs/2111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content='09564, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} +page_content=' 17' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0dFQT4oBgHgl3EQf0Daw/content/2301.13415v1.pdf'} diff --git a/1dAyT4oBgHgl3EQf1fkT/content/tmp_files/2301.00734v1.pdf.txt b/1dAyT4oBgHgl3EQf1fkT/content/tmp_files/2301.00734v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..f463bb0085dab1ca237547e602fe0e6744cb8aaf --- /dev/null +++ b/1dAyT4oBgHgl3EQf1fkT/content/tmp_files/2301.00734v1.pdf.txt @@ -0,0 +1,1547 @@ +Nonlinear Non-Hermitian Landau-Zener-St¨uckelberg-Majorana interferometry +Xin Wang,1 H. D. Liu,1, ∗ and L. B. Fu2, † +1Center for Quantum Sciences and School of Physics, Northeast Normal University, Changchun 130024, China +2Graduate School of China Academy of Engineering Physics, +No. 10 Xibeiwang East Road, Haidian District, Beijing, 100193, China +(Dated: January 3, 2023) +In this work, we have studied the non-Hermitian nonlinear LZSM interferometry in a non-Hermitian N-body +interacting boson system in which the non-Hermicity is from the nonreciprocal tunnelings between the bosons. +By using the mean-field approximation and projective Hilbert space, the effect of nonreciprocity and nonlin- +earity on the energy spectrum, the dynamics, and the formation of the interference fringes have been studied. +The different symmetries and the impact of the two different types of reciprocity, i.e. the in-phase tunneling and +anti-phase tunneling, on the energy spectrum and the phase transition between the Josephson oscillation and the +self-trapping have been investigated. For the LZSM interferometry, the strength of the nonreciprocity is found +to take an essential role in the population of the projective state and the strengths of the interference patterns in +the projective space. While the conditions of destructive and constructive interference under the weak-coupling +approximation still only depend on the strength of nonlinearity. Our result provides an application of the non- +linear non-Hermitian LZSM interferometry in studying the parameters of a non-Hermitian nonlinear two-level +system which related to the nonlinearity and the non-Hermicity. +I. +INTRODUCTION +The quantum two-level system (TLS) is the most basic part +of physical systems. Among them, the Landau-Zener (LZ) +transition between two levels at an avoided crossing [1–3] +has received widespread attention. When these two-level sys- +tems are under a strong periodic driving field, a series of +LZ transitions occur and the transitions probability exhibit a +periodic dependence on the phase (St¨uckelberg phase) accu- +mulated between transitions [1, 4]. The periodic change is +called Landau-Zener-St¨uckelberg-Majorana(LZSM) interfer- +ometry [5, 6]. With the development of research, LZSM inter- +ferometry has become an important phenomenon in quantum +science and technology. On the one hand, LZSM interfer- +ometry is used for ultra-fast universal quantum control of a +quantum-dot charge qubit [7] and characterized qubit dephas- +ing [8], etc. On the other hand, it has involved many fields +so far, such as molecular nanomagnets [9, 10], quasi-one- +dimensional layered materials [11, 12], ultracold molecules +[13], quantum noise [14], Bose-Einstein condensates [15–19], +Rydberg atoms [20], etc. Interestingly, if a two-level system +takes account of the nonlinear interaction, it may produce un- +expected interference features [21–26]. For the non-linear LZ +model, the self-trapping phase transition may occur in LZSM +interferometry [27–31], and there may be exceptional ring +structures in the energy spectra [32, 33]. +In recent years, the non-Hermitian quantum systems with +real energy spectra received widespread attention in the- +ory and experiment [34–41]. There are two kinds of non- +Hermicity, asymmetric coupling strengths in nonreciprocal +systems and the gain-loss in reciprocal system. +There are +two kinds of non-Hermitian Hamiltonians, describing nonre- +ciprocal systems with asymmetric coupling strengths [42–46] +∗ liuhd100@nenu.edu.cn +† lbfu@gscaep.ac.cn +and gain-loss systems [37–41]. Bender and Boettcher dis- +covered a series of parity-time (PT) -symmetric Hamiltonians +[47], which could result in real energy spectra. Mostafazadeh +generalized this type of Hamiltonian to a η-pseudo-Hermitian +quantum theory which explains the conditions for the non- +Hermitian system to have the real energy spectra (η is a pos- +itive Hermitian operator) [48–50]. The theory has been ap- +plied in many fields for more than ten years of development, +such as quantum field theory [51–55], super-symmetric quan- +tum mechanics [56, 57], non-commutative field theory [58], +quantum information [59], etc. Especially, there always ex- +ists some exceptional points (EPs) in the real energy spec- +trum of the non-Hermitian system [60, 61], at which two or +more eigenstates of the system coalesce. These EPs of the en- +ergy spectrum in the parameter space are closely related to the +symmetry, topological properties, and phase transitions of the +system [34–36]. Consequently, efforts have been put forward +to extend the study of LZ problem to non-Hermitian system +[6, 62–65]. Therefore, for non-Hermitian systems and nonlin- +ear LZSM interference, it is natural to ask how will the en- +ergy spectrum of the nonlinear LZ system changes if the non- +Hermiticity emerges? Will non-linearity affect EPs? Since +the populations of the bare states on the adiabatic eigenstates +normally can not be normalized by a time-independent coeffi- +cient [66]. Can the interesting self-trapping effect in the case +of nonlinear non-Hermitian still be observed? We shed lights +on these questions in this paper. By setting up the projec- +tive Hilbert space, we show that the populations of the projec- +tive quantum states can still achieve LZSM interferometry and +analyzed the influence of non-Hermicity and nonlinearity on +the energy spectra and the interference. Then, we discussed +the influence of non-Hermitian on the self-trapping effect. Fi- +nally, under the weak-coupling approximation of the projec- +tive quantum states, we further demonstrated the validity and +accuracy of the proposed method. +The structure of the paper is as follows. +In Sec.II, we +introduce a non-Hermitian N-body interacting boson system +which is equivalent to a nonlinear nonreciprocal two-level +arXiv:2301.00734v1 [quant-ph] 2 Jan 2023 + +2 +system with periodic driving in the mean-field approxima- +tion, and discussed the energy spectrum of this two-level sys- +tem, In Sec.III, the influence of nonlinear strength and non- +Hermiticity on LZSM interferometry and the self-trapping ef- +fects has been studied. Under the weak-coupling limit, the +non-Hermicity does not affect the conditions of destructive +interference and constructive interference. Finally, the con- +clusions are summarized in Sec.IV. +II. +NONLINEAR NONHERMITIAN TWO-LEVEL MODEL +The second quantized Hamiltonian of a nonreciprocal +interacting-boson system is +ˆH0 = γ +2(ˆa†ˆa − ˆb†ˆb) + ∆2 +2 ˆa†ˆb + ∆1 +2 ˆaˆb† − c +4N (ˆa†ˆa − ˆb†ˆb)2, (1) +where annihilation operators ˆa, ˆb and generation operators +ˆa†, ˆb† are for the different quantum states that are the left and +right well in the double-well BEC system. γ = A sin(ωt) + ϵ0 +is the monochromatic driving field with amplitude A, fre- +quency ω, and offset ϵ0. c is the interaction strength between +bosons, ∆i (i = 1, 2) is the tunneling amplitude. When the +total number of bosons N → ∞, all particles are assumed to +be in the same spin coherent state in the mean-field approx- +imation [67, 68]. Considering that the quantum states of the +non-Hermitian system are in a dual Hilbert space to keep the +normalize condition [50], the selected coherent states need to +be defined by both left and right states as +|Ψr +sc⟩ = +1 +√ +N! +(α1ˆa† + β1ˆb†)N|∅⟩, +|Ψl +sc⟩ = +1 +√ +N! +(α2ˆa† + β2ˆb†)N|∅⟩, +(2) +Based on this, we derive the semi-classical Hamiltonian (see +Appendix. A) +ˆHM = ⟨Ψl +sc| ˆH0|Ψr +sc⟩ +N += γ +2(α1α∗ +2 − β1β∗ +2) + ∆2 +2 α∗ +2β1 + ∆1 +2 α1β∗ +2 − c +4(β1β∗ +2 − α1α∗ +2)2, +(3) +by the dynamical evolution of the semiclassical Hamiltonian +[67] +i˙α1 = ∂ ˆHm +∂α∗ +2 +, +i˙β1 = ∂ ˆHm +∂β∗ +2 +, +(4) +we can construct the following dimensionless Schr¨odinger +equation +i ∂ +∂t +� +α1 +β1 +� += ˆHmF +� +α1 +β1 +� +, +(5) +with the MF Hamiltonian +ˆHmF = +� γ +2 + c +2(β1β∗ +2 − α1α∗ +2) +∆1 +2 +∆2 +2 +− γ +2 − c +2(β1β∗ +2 − α1α∗ +2) +� +, +(6) +t3 +t1 +t2 +t3 +t2 +t1 +ωt/π +(b) ϵ0 = 5 +(a) ϵ0 = 0 +En(t) +0 +1 +2 +3 +4 +-6 +-4 +-2 +0 +2 +4 +0 +1 +2 +3 +4 +-5 +0 +5 +En(t) +c/Δ=0 +c/Δ=3 +FIG. 1. Time evolution of the energy levels for different offsets: (a) +ϵ0 = 0 and (b) ϵ0 = 5, where A = 10, ω = 1 and ∆1∆2 > 0. The +time-dependent adiabatic energy levels (i.e., ∆ = 1) are shown by the +red (c = 0) and black (c = 3) dashed lines, while the diabatic energy +levels (i.e., ∆ = 0 ) are shown by the blue (c = 0) and green (c = 3) +solid lines. +and state |ψr⟩ = (α1, β1)T. Therefore, the model Hamiltonian +under periodic driving can be described by a nonlinear nonre- +ciprocal two-level Hamiltonian +ˆH = ∆1 + ∆2 +4 +ˆσx+ ∆1 − ∆2 +4 +i ˆσy+ γ(t) + c(β1β∗ +2 − α1α∗ +2) +2 +ˆσz (7) +where ˆσx,y,z are the Pauli matrices, α1, α2, β1, β2 are the prob- +ability amplitudes. The dynamic equations of the system are +[50] +i ∂ +∂t|ψr⟩ = ˆH|ψr⟩, +i ∂ +∂t|ψl⟩ = ˆH†|ψl⟩, +(8) +where ⟨ψl|ψr⟩ = 1 and the quantum states +|ψr⟩ = α1 |↑⟩ + β1 |↓⟩ , +|ψl⟩ = α2 |↑⟩ + β2| |↓⟩ +(9) +are represented under the diabatic basis {|↑⟩ , |↓⟩} with spin +eigenstates |↑⟩ and |↓⟩. +For the adiabatic basis, the left and right instantaneous +eigenstates of the time-dependent Hamiltonian ˆH are derived +by[50] +ˆH|φr +n⟩ = En|φr +n⟩, +ˆH†|φl +n⟩ = E∗ +n|φl +n⟩, +(10) +where ⟨φl +m|φr +n⟩ = δnm (n = 1, 2), the eigenenergies En(t) are +determined by the quartic equation (see Appendix. B) +E4+cE3+ 1 +4(c2−γ2−∆1∆2)E2− c∆1∆2 +4 +E− ∆1∆2c2 +16 += 0. (11) +By solving equation (11), we draw the energy spectrum of the +system (7) (see Fig.1 and Fig.2). The two parameters +∆ ≡ +� +|∆1∆2|, +k ≡ +� +|∆1/∆2| +(12) + +3 +Ep +t3 +t2 +t1 +t1 +Ep +(b) ϵ0 = 5 +c/Δ=0 +c/Δ=3 +(a) ϵ0 = 0 +0 +1 +2 +3 +4 +-6 +-4 +-2 +0 +2 +4 +1 +2 +3 +4 +-5 +0 +5 +En(t) +En(t) +ωt/π +t3 +FIG. 2. Time evolution of the energy levels for different offsets: (a) +ϵ0 = 0 and (b) ϵ0 = 5, where A = 10, ω = 1 and ∆1∆2 < 0. The +time-dependent adiabatic energy levels (i.e., ∆ = √|∆1∆2| = 1) are +shown by the red (c = 0) and black (c = 3) dashed lines, while the +diabatic energy levels (i.e., ∆ = 0 ) are shown by the blue (c = 0) and +green (c = 3) solid lines. +are introduced to describe the mean tunneling amplitude and +the nonreciprocity. +In the in-phase tunneling case ∆1∆2 > 0 as shown in Fig.1, +the energy spectrum of the system (7) is the same as the Her- +mitian Hamiltonian ˆHh = ∆ +2 ˆσx + γ(t)+c(|β|2−|α|2) +2 +ˆσz. Therefore, +the Hamiltonian ˆH and quantum states |ψr⟩ of the two non- +reciprocal systems can be related to the Hermitian system by +following relation +ˆHh = ˆS ˆH ˆS −1, +|ψ⟩ = ˆS |ψr⟩ = +� +α1 +kβ1 +� +. +(13) +where ˆS = +� +1 0 +0 k +� +. Compared with ˆHh, the nonreciproc- +ity, which only affects the eigenstates of the system, neither +changes the eigenvalue nor destroys the symmetry of the sys- +tem. In the anti-phase tunneling case ∆1∆2 < 0 as shown in +Fig.2 , the non-adiabatic energy levels have a series of de- +generate points (EPs) when c = 0 (see the crossing points of +red dash lines in Fig.2, and the imaginary parts of En are not +shown). Interestingly, when the nonlinearity is added (c � 0), +the EPs disappear and the near-degenerate regions are formed +(see the black dashed lines in Fig.2). When considering the +offset (ϵ0 � 0), the near-degenerate regions disappear near the +times t +′ +n = +t1+t3 +2 ++ 2nπ +ω (with n being an integer), the period +changes from nπ +ω to 2nπ +ω , and the ring energy levels will tend to +degenerate at times t1 + 2mπ +ω (with m being an integer) as ϵ0 in- +creases as shown in Fig.2. Obviously, the nonlinearity affects +the EPs. By equation (11), En = 0 is the root of the equation +iff c∆1∆2 = 0. Therefore, the existence of c does not allow the +existence of EPs in the anti-phase tunneling case ∆1∆2 < 0. +Next, we analyzed the cases of the existence of real roots of +0 +1 +2 +3 +4 +5 +c/ +-4 +-2 +-1012 +4 +(t)/ +FIG. 3. Different regions for parameter space of +c +∆ and +γ +∆ in the +anti-phase tunneling case. Region I for f( c +∆, γ +∆) < 0, Region II for +γ2 +∆2 > 1 when f( c +∆, γ +∆) > 0, Region III for γ2 +∆2 < 1. Naturally, when +f( c +∆, γ +∆) < 0, the inequality γ2 +∆2 > 1 is guaranteed. +the energy spectrum. +For the special cases c = 0, the eigenenergies of the system +are ± +� +γ2(t) + ∆1∆2. It is easy to find that the EPs emerge +at γ2(t) = −∆1∆2 in the anti-phase tunneling case ∆1∆2 < 0. +For c � 0, the nature (real or not) of the roots of the energy +equation (11) depend on the sign of +δ = −c2γ2∆1∆2ξ, +(14) +with ξ = ((c2 − γ2 − ∆1∆2)3 − 27c2γ2∆1∆2). +When δ > 0, there are two real roots and a pair of conjugate +complex roots. The system will always have real eigenener- +gies. When δ < 0, the equation has four unequal real roots if +c2 + 2(∆1∆2 + γ2) and (∆1∆2 + γ2)(2c2 + ∆1∆2 + γ2) are both +positive. Otherwise, the equation has two pairs of unequal +conjugate complex roots. Obviously, for the in-phase tunnel- +ing case ∆1∆2 > 0, there always exists real eigenenergies of +the system. +For the anti-phase tunneling case with δ < 0, the conditions +that the energy equation has real roots can be simply described +as γ2 +∆2 > 1 in f( c +∆, γ +∆) = [( c +∆)2−( γ +∆)2+1]3+27( c +∆)2( γ +∆)2 < 0. In- +terestingly, γ +∆ = ±1 are exactly the tangent lines of f( c +∆, γ +∆) = +0. Therefore, the condition is naturally satisfied (as shown in +Fig.3), so we get the same conclusion as ∆1∆2 > 0. +Finally, we consider another two special case: γ = 0 and +ξ = 0. The energy spectrum are all complex only when δ = 0, +c(∆1∆2 − γ2) = 0, (∆1∆2 + γ2)(2c2 + ∆1∆2 + γ2) = 0 and +c2 + 2(∆1∆2 + γ2) < 0. For, c � 0 and ∆1∆2 � 0, these +conditions cannot be satisfied at the same time. +In a word, the system will always have real eigen energies. +These results on the nature of the eigenenergies can be ex- +plained by the symmetry related to the different types of non- +reciprocal. For the in-phase tunneling case ∆1∆2 > 0, the +symmetry of the system is unbroken since the system can be +transformed into a Hermitian one with ˆS . Therefore, the real +eigen energies are guaranteed. While it is not a necessary re- +sult for the anti-phase case ∆1∆2 < 0 . Although the non- +linearity c makes EPs disappear in the evolution of En, the +eigenvalues of one energy state are still complex. For these +two cases, it is inevitable to have different effects on the evo- +lution of states. So next we will analyze the dynamic evolution + +4 +FIG. 4. +The interference patterns of the population probability +|α1|2 at time t = 50/∆ as a function of ϵ0/∆ and ω/∆ in the state +(α1(0), β1(0)) = (0, 1), (α2(0), β2(0)) = (0, 1) with (a) c/∆ = 0, +∆1∆2 > 0, (b) c/∆ = 1.05, ∆1∆2 > 0, (c) c/∆ = 0, ∆1∆2 < 0, +and (d) c/∆ = 1.05, ∆1∆2 < 0. The other parameters are chosen +as k = 2, A/∆ = 2.5. The white area is singular, and |α1|2 tends to +infinity. +of the two cases based on the method of the projective Hilbert +space. +III. +NONLINEAR NON-HERMITIAN LZSM +INTERFEROMETRY +In the nonlinear Hermitian LZ system, The LZSM inter- +ference patterns can be destructive or constructive, which are +determined by the St¨uckelberg phases and the nonlinearity can +strongly change the features of the LZSM interferometry. As +shown in Fig. 4, the interference pattern of |α1|2 is axisymmet- +ric for the linear in-phase tunneling case (c = 0, ∆1∆2 > 0). In +the nonlinear case (c � 0), the symmetry of the interference +pattern is destroyed (as shown in Fig. 4b). When c = 0 and +∆1∆2 < 0, the Eps make the interference patterns divergent +and form a singular region (white area in Fig. 4c). It is hard +to study the influence of each parameter on the features of the +LZSM interferometry. Next, we propose the concept of pro- +jective Hilbert space (see AppendixC for detail) and find the +effect of the nonreciprocity k. +Through equations (8), without losing generality, the quan- +tum state |ψr⟩ can be defined as +|ψr⟩ = eµ(t)+iν(t)| ˜ψ⟩ = eµ(t)+iν(t) +� ˜a +˜b +� +, +(15) +with the normalization relation ⟨ ˜ψ| ˜ψ⟩ = 1 (µ and ν are two real +parameters), where | ˜ψ⟩ = +� ˜a +˜b +� +is the quantum state in the pro- +jective Hilbert space. Then, we draw the normalized interfer- +ence patterns |˜a|2 = |α1|2/(|α1|2+|β1|2) (see Fig.5). Comparing +with |α1|2, the regulation of the parameters on the |˜a|2 interfer- +ence pattern are emerge when c = 0. This is because the +LZSM interference is determined by the St¨uckelberg phases. +The phases accumulated in the evolution process are retained +in the quantum states | ˜ψ⟩ in the projective Hilbert space by +FIG. 5. The interference patterns of the projective state population +probability |˜a|2 at time t = 50/∆ as a function of ϵ0/∆ and ω/∆ in the +state (α1(t0), β1(t0)) = (0, 1), (α2(t0), β2(t0)) = (0, 1) in the anti-phase +tunneling case ∆1∆2 < 0 with (a) c/∆ = 0, k = 2, (b) c/∆ = 1.05, k = +2, (c) c/∆ = 0, k = 1/2, and (d) c/∆ = 1.05, k = 1/2. +removing the divergence caused by the non-Hermitian term +em(t). In Fig.5, when c = 0, the populations of the correspond- +ing the projective quantum states in the singular region of the +quantum states are limited to the values affected by the nonre- +ciprocity k. To further reveal the influence of parameter k, we +next start from the simplest case with c = 0 and then analyze +the case with c � 0. Then, we demonstrated the validity and +accuracy of the proposed method and numerical results in the +weak-coupling limit. +A. +The effect of noncrciprocity and the projective quantum +states in the linear non-Hermitian system +Assuming c = 0, the Hamiltonian of the system (7) be- +comes +ˆHmF = +� +γ +2 +∆1 +2 +∆2 +2 +− γ +2 +� +, +(16) +where ∆1∆2 < 0. Consider the quantum state |ψr⟩ = eµ+iν| ˜ψ⟩ = +eµ+iν +� ˜a +˜b +� +, and Eq. (8), one can get +˙µ = − i +2⟨ ˜ψ| ˆH − ˆH†| ˜ψ⟩, +˙ν = −1 +2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + i⟨ ˜ψ| ˙˜ψ⟩, +(17) +Substituting Eq. +(17) and the definition | ˜ψ⟩ = +� ˜a +˜b +� +≡ +� +sin θ +2eiϕ +cos θ +2 +� +into equation (8), we have (see Appendix C for + +43 +3 +(a)c/△=0 +(b)c/ △=1 +0 +9 +6 +3 +3 +(c)c/ △=0 +(d)c/ △=1 +0 +-9 +-6 +-3 +0 +3 +6 +9 +-9 +-6 +-3 +0 +3 +6 +Eo/△ +Eo/△2 +.05 +0 +30 +20 +10 +.05 +0 +99 +60.8 +0.63 +3 +(a) c/ △=0 +(b) c +0 +9 +6 +3 +3 +(c) c/ △=0 +(d) c +0 +-9 +-6 +-3 +0 +3 +6 +9 +-9 +-6 +-3 +0 +3 +/△ +E0/△ +Eo0.4 +0.2 +/△=1.05 +0 +0.2 +0.1 +△=1.05 +0 +6 +99 +65 +FIG. 6. The dynamical evolution trajectory of the projective right +quantum state of the system (16) on the Bloch sphere with the dif- +ferent non-Hermitian: (a) k = 2 and (b) k = 1/2. The numerical +simulation parameters: +A +∆ = 2.5, ϵ0 = 0 and the initial condition is +(˜a, ˜b) = (0, 1). The z-axis coordinates of the points of the red dashed +circle on the Bloch sphere are z0 = cos θ0 = 1−k2 +1+k2 . +details) +˙θ = −∆1 sin ϕ cos2 θ +2 − ∆2 sin ϕ sin2 θ +2, +˙ϕ = −γ − ∆1 +2 cot θ +2 cos ϕ + ∆2 +2 tan θ +2 cos ϕ, +˙µ = ∆2 − ∆1 +4 +sin θ sin ϕ, +˙ν = γ +2 − ∆2 +2 tan θ +2 cos ϕ. +(18) +For ϵ0 = 0, when the time is long enough, the projective state +will always be on a certain circle (˙θ = 0) of the Bloch sphere +(see Fig.6). By Eq. (18), we can get the equation of the circle +where the projective quantum state finally lies. surprisingly, +we find the correlation between k and θ0 = limt→∞ θ as +k2 = tan2 θ0 +2 . +(19) +Therefore, in combination with Fig.5, we can explain why +|˜a|2 is limited to a certain value in the singular region. +B. +The influence of interaction and non-Hermitian on +population in the projective Hilbert space +In the nonlinear Hermitian system[33], i.e ∆ = ∆1 = ∆2, +when ϵ0 = 0 and A ≪ ω, the population of the system will +have the self-trapping phase transition and the Josephson os- +cillation under the different nonlinearities, and the boundary +line is c/∆ = 2[67, 69]. Based on this, we next study the non- +linear non-Hermitian LZSM interference patterns for ϵ0 = 0 +with different nonlinearities c, non-Hermitian parameters k +and mean amplitudes ∆ [see Fig.7 and Fig.9]. +Firstly, we consider the in-phase tunneling case ∆1∆2 > 0, +where the symmetry of the system is unbroken. For the Her- +mitian Hamiltonian ˆHh, near the boundary of two different os- +cillations, the maximum population of the self-trapping region +is 0.5, and then the amplitude gradually decreases with the in- +crease of c/∆. The populations of the state for non-Hermitian +FIG. 7. The nonlinear non-Hermitian LZSM interference patterns +with different nonlinearities (a) k = 2, and (b) k = 1/2 for weak +driving at ϵ0 = 0 and the in-phase tunneling case ∆1∆2 > 0: the +projective population |˜a|2 as a function of ∆/ω and c/ω for A/ω = +0.05 from the initial time t0 = 0 to t = 2π/ω , The red dashed-dotted +line (with slope 1/2) is plotted to denote the boundary between the +different oscillations. +Hamiltonian ˆH with ∆1 � ∆2 is only different from those for +the Hermitian Hamiltonian ˆHh in a weight of k as shown in +Eq. (13). Therefore, we can get |˜a|2 = k2|˜b|2 at the boundary +similar with the Hermitian case. Therefore, the boundary line +c/∆ = 2 (red dashed line in Fig.7) between the two regions +(self-trapping and Josephson oscillation) is the same as that in +the Hermitian system. The amplitude of the population of the +projective quantum state is determined by the nonreciprocal k +as show in Fig.7(a) and (b). Then, we consider the dynamical +evolution of the projective quantum state near the boundary, +by Eq. (8) and (15), one can obtain +˙θr =ImA sin θr − ∆1 sin ϕr cos2 θr +2 − ∆2 sin ϕr sin2 θr +2 , +˙ϕr = − γ − ReA − ∆1 +2 cot θr +2 cos ϕr + ∆2 +2 tan θr +2 cos ϕr, +˙µr = − ImA +2 +cos θr + ∆2 − ∆1 +4 +sin θr sin ϕr, +˙νr =γ +2 + ReA +2 +− ∆2 +2 tan θr +2 cos ϕr. +(20) +with the right quantum state |ψr⟩ = +� +α1 +β1 +� += eµr+iνr � ˜a +˜b +� += +eµr+iνr � +sin θr +2 eiϕr +cos θr +2 +� +, and +˙θl = − ImA sin θl − ∆2 sin ϕl cos2 θl +2 − ∆1 sin ϕl sin2 θl +2 , +˙ϕl = − γ − ReA − ∆2 +2 cot θl +2 cos ϕl + ∆1 +2 tan θl +2 cos ϕl, +˙µl =ImA +2 +cos θl + ∆1 − ∆2 +4 +sin θl sin ϕl, +˙νl =γ +2 + ReA +2 +− ∆1 +2 tan θl +2 cos ϕl. +(21) +with the left quantum state |ψl⟩ = +� +α2 +β2 +� += eµl+iνl � ˜al +˜bl +� += +eµl+iνl � +sin θl +2 eiϕl +cos θl +2 +� +, where A ≡ c(α1α∗ +2 − β1β∗ +2). By numerical +simulation, we give the dynamical evolution of the projective +right state on the Bloch sphere near the boundary c/∆ = 2 in +Fig.8. + +1/2XZ +(a) k=2 +(b) k=13 +5 +(a) k=2 +0 +0 +50.5 +0 +01013 +5 +(b) k=1/2 +0 +0 +50.5 +0 +0106 +FIG. 8. The dynamics of the projective states represented by the +trajectories spherical coordinates (θ, φ) on the Bloch sphere in the +in-phase tunneling case ∆1∆2 > 0 with different strengths of nonlin- +earity and nonreciprocity: (a) c/∆ = 1.9, k = 2, (b) c/∆ = 2, k = 2, +(c) c/∆ = 2.1, k = 2, (d) c/∆ = 1.9, k = 1/2, (e) c/∆ = 2, k = 1/2, +and (f) c/∆ = 2.1, k = 1/2. The other parameters are chosen as +A +ω = 0.05, ϵ0 = 3, and the initial state is (˜a, ˜b) = (0, 1). The z- +axis axis coordinates of the red dashed circle on the Bloch sphere +are z0 = cos θ0 = 1−k2 +1+k2 , and the z-axis axis coordinates of the green +dashed circle on the Bloch sphere are z +′ +0 = 0. +When c/∆ > 2, the projective states can only evolve on +the surface of the Bloch sphere above the red dashed circle as +shown in Fig. 8 (b), (c), (e) and (f). The red circle represent +the projective states of which the relative population differ- +ence |˜b|2 − |˜a|2 is 1−k2 +k2+1 = cos θ0. By |˜a|2 = k2|˜b|2 and the nor- +malization condition, cos θ0 = |˜b|2 − |˜a|2 labels the boundary +between the self-trapping region and the Josephson oscilla- +tion region. As we discussed before, the nonreciprocal k does +not affect the constructive interference and destructive inter- +ference, but affects the the relative population difference of +the state. When k is larger, the relative population difference +at the boundary between the two regions are smaller [see the +red circle in Fig. 8(a-c) and (d-f)] and the projective popula- +tion probability |˜a|2 are smaller [see Fig. 7 (a) and (b)]. +For +the anti-phase tunneling case ∆1∆2 < 0, because of the exis- +tence of EPs in the linear case c = 0, the projective quantum +states reaches self-trapping region no matter how weak the +nonlinearity is. The trajectories of the projective states on the +Bloch sphere will always above the red dashed circles which +label the boundaries between the self-trapping region and the +Josephson oscillation region as shown in Fig.9. the maximum +population of the projective quantum state is still affected by +the nonreciprocity k as shown in Eq. (19) and Fig.10(a-d). +FIG. 9. The nonlinear non-Hermitian LZSM interference patterns +with different nonlinearities (a) k = 2, and (b) k = 1/2 for weak +driving at ϵ0 = 0 and the anti-phase tunneling case ∆1∆2 < 0: the +projective population |˜a|2 as a function of ∆/ω and c/ω for A/ω = +0.05 from the initial time t0 = 0 to t = 2π/ω. +FIG. 10. The dynamics of the projective states represented by the tra- +jectories spherical coordinates (θ, φ) on the Bloch sphere in the anti- +phase tunneling case ∆1∆2 < 0 with different strengths of nonlinear- +ity and nonreciprocity: (a) c/∆ = 0.1, k = 2, (b) c/∆ = 1, k = 2, (c) +c/∆ = 0.1, k = 1/2, and (d) c/∆ = 1, k = 1/2. The other parameters +are chosen as A +ω = 0.05, ϵ0 = 3, and the initial state is (˜a, ˜b) = (0, 1). +The z-axis coordinates of the red dashed circle on the Bloch sphere +are z0 = cos θ0 = 1−k2 +1+k2 , and the z-axis coordinates of the green dashed +circle on the Bloch sphere are z +′ +0 = 0. +Compare Fig Fig.10(b) and (d) with Fig.10(a) and (c), it is +easy to find that the stronger the nonlinearity, the stronger the +degree of self-trapping effect. +C. +Weak-coupling limit of the projective quantum states: +∆ ≪ ω +When the weak-coupling limit is considered, the adia- +batic energy levels will be difficult to transition in the near- +degenerate region. However, in this approximation, we only +make |˜ag(t)|2 ∼ |˜ag(t0)|2 and |˜bg(t)|2 ∼ |˜bg(t0)|2 where g = r, l. +Assuming that the initial condition is (˜ag(t0), ˜bg(t0)) = (0, 1), +the quantum state can always be written in the following form: +|ψg(t)⟩ = eµg(t)+iνg(t) +� +0 +1 +� +, +(22) + +0.83 +5 +(a) k=2 +0 +0 +50.6 +0.4 +0.2 +0 +10100.23 +5 +A +(b) k=1/2 +0 +0 +50.1 +0 +10101(c) c/△=0.1 +(d) c/ △=1(a) c/△=0.1 +(b) c/ △= +Z↑ Z +X(c) c/△=2.1 +(d) c/ △=1. +(e) c/ △=2 +(f) c/ △=2.9(a) c/△=1.9 +(b) c/ △=27 +0 +0.02 +0.04 +0 +2 +4 +10-3 +0 +0.02 +0.04 +0 +1 +2 +10-3 +0 +5 +10 +0 +1 +2 +10-4 +0 +5 +10 +0 +1 +2 +10-3 +exact +approximate +(a) c/ =0 +(b) c/ =0.5 +(d) c/ =0 +(e) c/ =0.5 +(c) c/ =1 +(f) c/ =1 +FIG. 11. Time evolution of the projective population probability |˜a|2 +for weak coupling in the in-phase tunneling case ∆1∆2 > 0, with +different nonlinearities: (a) c/ω = 0, k = 2, (b) c/ω = 0.5, k = 2 and +(c) c/ω = 1, k = 2. (d) c/ω = 0, k = 1/2, (e) c/ω = 0.5, k = 1/2 +and (f) c/ω = 1, k = 1/2. The other parameters are A/ω = 10.5, +∆/ω = 0.05, and ϵ0/ω = 3. +where g = r, l. By Eqs. (8),(17) and (22), we get ˙µr(t)+i˙νr(t)+ +˙µl(t) − i˙νl(t) = 0. This means +β1(t)β∗ +2(t) − α1(t)α∗ +2(t) ∼ β1(t0)β∗ +2(t0) − α1(t0)α∗ +2(t0), +(23) +Based on this approximation, we can transform the dynamic +of the system from Schr¨odinger picture to Dirac picture by in- +troducing the gauge transformation φr(t) = U(t)ϕr(t) [U(t) = +ϵ0 +2t − A cos(ωt) +2ω ++ c +2(β1β∗ +2 − α1α∗ +2) with ϕr(t) = [˜α1, ˜β1]T ] [33]. +Under the new basis, the nonlinear dynamic Eqs. (8) become +(Assuming ∆1 > 0): +i ∂ +∂t +� ˜α1 +˜β1 +� += +� +0 +kΩ +(−1)j +k Ω∗ +0 +� � ˜α1 +˜β1 +� +, +(24) +and +i ∂ +∂t +� ˜α2 +˜β2 +� += +� +0 +(−1)j +k Ω∗ +kΩ +0 +� � ˜α2 +˜β2 +� +(25) +with +Ω = ∆ +2 eiΦ(t), +Φ(t) = ϵ0t − A cos(ωt) +ω ++ ct, +(26) +and j = 1, 2 corresponding to the anti-phase case ∆2 < 0 +and in-phase case ∆2 > 0, respectively. Ω denotes the field- +induced Rabi frequency where Φ(t) is the relative phase of +two diabatic energy levels. The nonreciprocity k in front of +0 +0.02 +0.04 +0 +2 +4 +10-3 +0 +5 +10 +0 +1 +2 +10-3 +0 +5 +10 +0 +1 +2 +10-4 +(d) c/ =0.5 +(b) c/ =0.5 +(c) c/ =0 +(a) c/ =0 +exact +approximate +FIG. 12. Time evolution of the Projective quantum state population +probability |˜a|2 for weak coupling in the anti-phase tunneling case +∆1∆2 < 0, with different nonlinearities: (a) c/ω = 0, k = 2 and (b) +c/ω = 0.5, k = 2. (c) c/ω = 0, k = 1/2 and (d) c/ω = 0.5, k = 1/2. +The other parameters are A/ω = 10.5, ∆/ω = 0.05, and ϵ0/ω = 3. +Ω correspond to the weight of the populations of the projec- +tive quantum state. Thus, we can understand the fact that the +maximums value of the populations under the self-trapping +regions change with k2 in the in-phase case ∆1∆2 > 0. In a +full cycle, Φ(t) can be approximately written as +Φ(t) ⋍ +� t3 +t1 +(ϵ0 + c − nω)dt = 2π +ω (ϵ0 + c − nω) +(27) +with n = 0, ±1, ±2, .... When Φm = 2mπ, i.e. c + ϵ0 ≃ (n + +m)ω = dω (m, d = 0, ±1, ±2, ...), the patterns are constructive. +While, the patterns will be destructive when Φm = (2m+ 1 +2)π,. +By calculating the nonlinear equation (8), the linear equa- +tion(24), we can get the exact solution and approximate solu- +tion respectively. In Fig.11, we show multi-period LZSM in- +terference fringes with different characteristics in the in-phase +tunneling case ∆2 > 0. when c = 0, 1, i.e., Φm = 2mπ, +the patterns are constructive, and when c = 0.5, 1.5, i.e., +Φm = (2m + 1 +2)π, the patterns are destructive. In all non- +linear cases, the two are consistent. In Fig.12, we show the +anti-phase tunneling case ∆2 < 0. Like the in-phase tunneling +case, the constructive interference and destructive interference +only depend on m, and the nonreciprocity k only affect the +maximal value of the projective population probability |˜a|2. +IV. +CONCLUSION +In this work, we have studied the non-Hermitian nonlin- +ear LZSM interferometry in which the non-Hermicity is from +the nonreciprocal tunnelings between the bosons. By using +the mean-field approximation and projective Hilbert space, +the effect of nonreciprocity and nonlinearity on the energy + +8 +spectrum, the dynamics, and the formation of the interfer- +ence fringes have been studied. The results show that dif- +ferent types of reciprocity correspond to different types of +symmetries of the system. For the in-phase tunneling case +∆1∆2 > 0, the system can be transformed into a Hermitian one +with a nonunitary transformation. It has the same energy spec- +trum and boundary between the Josephson region and the self- +trapping region as the Hermitian one. While it is not a neces- +sary result for the anti-phase case ∆1∆2 < 0. The EPs can only +exist in its linear case c = 0 and the eigenvalues of one en- +ergy state will be complex in its nonlinear case. There is only +a self-trapping region in this case since the evolution of the +projective states will always be above the boundary when the +nonlinearity exists. For the LZSM interferometry, the strength +of the nonreciprocity k is found to take an essential role in the +population of the projective state and determine the maximal +values and strengths of the interference patterns in the projec- +tive space. Finally, under the weak-coupling approximation, +we found that the types and strengths of the nonreciprocity do +not affect the conditions of destructive and constructive inter- +ference. It only depends on the strength of nonlinearity. Our +result provides a possible way to study the parameters of a +non-Hermitian nonlinear two-level system and its related ex- +ternal fields by the LZSM interferometry. +ACKNOWLEDGMENTS +We thank S. C. Li and F. Q. Dou for their helpful discus- +sions. This work is supported by the National Natural Sci- +ence Foundation of China (NSFC) (Grants Nos. 11875103, +12147206, 11725417, 12088101, 12047548, and U1930403), +and Science Challenge Project (Grant No. TZ2018005)). +Appendix A: Semi-classical Hamiltonian +In the non-Hermitian system, let ˆH be a non-Hermitian Hamiltonian with a complete biorthonormal eigenbasis {|ψr +n⟩, |ψl +n⟩}, +the orthogonal normalization of the quantum states are +⟨ψr +n|ψl +m⟩ = δnm. +(A1) +Similarly, for system (1), in the mean-field approximation, the coherent state should be written as +|Ψr +sc⟩ = +1 +√ +N! +(α1ˆa† + β1ˆb†)N|∅⟩, +(A2) +|Ψl +sc⟩ = +1 +√ +N! +(α2ˆa† + β2ˆb†)N|∅⟩, +(A3) +According to the normalization condition ⟨Ψl +sc|Ψr +sc⟩ = 1: +α1α∗ +2 + β1β∗ +2 = 1. +(A4) +Then, applying the Hamiltonian of system (1) to the right quantum state |Ψr +sc⟩ , one can obtain +ˆH|ψr +SC⟩ = +�γ +2 ˆa†ˆa − ˆb†ˆb + ∆2 +2 ˆa†ˆb + ∆1 +2 ˆaˆb† − c +4N (ˆa†ˆa − ˆb†ˆb)2) +� +1 +√ +N! +N +� +r=0 +Cr +N(α1ˆa†)N−r(β1ˆb†)r|∅⟩, +(A5) +When calculating the expectation value of an observable, the quantum states of the systems are normalized. So in the system +(1), the expectation value of ˆH0 should be written as +⟨Ψl +sc| ˆH0|Ψr +sc⟩ =Nγ +2 +N +� +r=0 +(N − 1)! +(N − r − 1)!r!(α1α∗ +2)N−r−1(β1β∗ +2)rα1α∗ +2 − Nγ +2 +N +� +r=0 +(N − 1)! +(N − r)!(r − 1)!(α1α∗ +2)N−r(β1β∗ +2)r−1β1β∗ +2 ++N(∆2 +2 +N +� +r=0 +Cr +N−1(N − r)(α1α∗ +2)N−r−1(β1β∗ +2)rα∗ +2β1 + ∆1 +2 +N +� +r=0 +Cr−1 +N−1r(α1α∗ +2)N−r(β1β∗ +2)r−1α1β∗ +2) ++ +N +� +r=0 +Cr−1 +N−1r(α1α∗ +2)N−r(β1β∗ +2)r−1α1β∗ +2) − cN +4 (β1β∗ +2 − α1α∗ +2)2 +=Nγ +2 (α1α∗ +2 − β1β∗ +2) + N∆2 +2 (α∗ +2β1) + N∆1 +2 (α1β∗ +2) − cN +4 (β1β∗ +2 − α1α∗ +2)2, +(A6) +The expectation value of each particle is +ˆHM = ⟨Ψl +sc| ˆH0|Ψr +sc⟩ +N += −c +4(β1β∗ +2 − α1α∗ +2)2 + ∆2 +2 (α∗ +2β1) + ∆2 +2 (α1β∗ +2) + γ +2(α1α∗ +2 − β1β∗ +2). +(A7) + +9 +Appendix B: Derivation of the Energy level equation +In the non-Hermitian system, the Hamiltonian ˆH has a complete biorthonormal eigenbasis {|ψr +n⟩, |ψl +n⟩} of satisfying +ˆH|φr +n⟩ = En|φr +n⟩, +(B1) +ˆH†|φl +n⟩ = E∗ +n|φl +n⟩, +(B2) +⟨φl +m|φr +n⟩ = δmn, +(n = 1, 2, ...) +(B3) +By equations (B1), we can naturally conclude that the adiabatic basis of the system (7) satisfies +Fα1 + i∆ +2 β1 = Eα1, +i∆ +2 α1 − Fβ1 = Eβ1, +(B4) +F∗α2 − i∆ +2 β2 = E∗α1, +− i∆ +2 α2 − F∗β2 = E∗β2, +(B5) +α1α∗ +2 + β1β∗ +2 = 1. +(B6) +where F ≡ γ +2 + c +2(β1β∗ +2 − α1α∗ +2). To derive non-trivial solutions of Eqs. (B1) and (B2), we must ensure that | ˆH − E ˆI| = 0 and +| ˆH† − E∗ ˆI| = 0 (ˆI is an identity matrix). Namely, +E2 − F2 + ∆2 +4 = 0, +(B7) +E∗2 − F∗2 + ∆2 +4 = 0, +(B8) +By (B4) and the complex conjugate of Eq. (B5), we have +α1α∗ +2 +β1β∗ +2 += −4(E + F)2 +∆2 +, +(B9) +By the normalization (B6) and Eq. (B7), it becomes +β1β∗ +2 = E − F +2E +, +(B10) +Therefore, +F ≡ γ +2 + c +2(β1β∗ +2 − α1α∗ +2) = γ +2 − cF +2E . +(B11) +Substitute Eq. (B11) into Eq. (B7), we finally have +E4 + cE3 + 1 +4(c2 − γ2 + ∆2)E2 + c∆2 +4 E + ∆2c2 +16 += 0. +(B12) +Appendix C: The projective space for non-Hermitian quantum system +Consider the following Schr¨odinger equation +i d +dt|ψ(t)⟩ = ˆH|ψ(t)⟩, +(C1) + +10 +where ˆH is generally a non-Hermitian Hamiltonian. Let us define |ψ(t)⟩ = eµ+iν| ˜ψ(t)⟩ with the normalization relation ⟨ ˜ψ(t)| ˜ψ(t)⟩ = +1 (µ and ν are two real parameters). From Eq. (C1) and its Hermitian conjugation, one can get +˙µ = − i +2⟨ ˜ψ| ˆH − ˆH†| ˜ψ⟩, +(C2) +and +˙ν = −1 +2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + i⟨ ˜ψ| ˙˜ψ⟩. +(C3) +One has to keep mind that the above deduction is some different from what had been done by using adjoint equation of (C1). +In quantum theory with Hermitian Hamiltonian systems, |ψ(t)⟩ and | ˜ψ(t)⟩ are equivalence, since the time evolution is unitary +(probability preserving) and they are only different in a global phase. Under this equivalence, | ˜ψ(t)⟩ can be employed as a vector +on so-called projective Hilbert space of the system. However, for a system with a non-Hermitian Hamiltonian, the time evolution +is not unitary. Hence, though the state vectors only differ in norms, they may describe different system states. Nevertheless, we +can still formally set up the projective Hilbert space for a non-Hermitian system by using | ˜ψ(t)⟩ as a state on it. +Based on the above definition, from Eqs. (C2) and (C3), we can see that one can obtain the norm increment and the global +phase of the state acquiring in its time evolution only from the trace in the projective space, the latter is as the same as for +Hermitian systems. The global phase and its relation with the projective Hilbert space plays significant role in geometric +(topology) properties of Hermitian quantum systems. Therefore, it may be interesting to study the geometric properties of a +non-Hermitian system in such a point of view. +In order to show such discussions clearly, we employ a two-level system, describing physics of two coupled sites with gain +and loss, of which the counterpart Hermitian system also plays a role in illustrating the geometric properties of quantum systems. +The time evolution of such a two-level system is described by a 2 × 2 matrix Hamiltonian system by the following equation, +i d +dt +� +a +b +� += +� +H11 H12 +H21 H22 +� � +a +b +� +, +(C4) +Then following the definition |ψ(t)⟩ = eµ+iν| ˜ψ(t)⟩, one can get +d +dt(iµ − ν)˜a + i d +dt ˜a = H11˜a + H12˜b, +(C5) +d +dt(iµ − ν)˜b + i d +dt +˜b = H21˜a + H22˜b, +(C6) +Combining with their complex conjugations, and considering |˜a|2 + |˜b|2 = 1, we can easily verify the equations (C2) and (C3). +For convenience and without losing generality, we then construct the vector in the projective space for a state |ψ(t)⟩ = +� +a +b +� +with | ˜ψ(t)⟩ = +� ˜aeiϕ +˜b +� +, ˜a = +a +√ +|a|2+|b|2 , ˜b = +b +√ +|a|2+|b|2 , and ϕ = arg(a) − arg(b). By denoting z = |b|2 − |a|2 which is just the relative +population difference of the two levels, it then can be mapped to a sphere, the so-called Bloch sphere, with the coordinates (ϕ, z). +From Eq. (C3), we can obtain the evolution of the total phase +d +dtβ = −1/2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + 1/2(1 − z)dϕ +dt . +(C7) +This equation is the same as what had been obtained for Hermitian systems by Aharonov and Anandan excepting that in the +dynamic part Hermitian Hamiltonian ˆH is replaced by ( ˆH + ˆH†)/2. The second part in the right hand of the above equation +is known as the geometric part. One can easily prove that, if the trace of the evolution is closed in the projective space, the +geometric phase just equals to the half of solid angle of the close path on the Bloch sphere, which is just the so-called AA phase, +the geometric phase of cyclic state. +[1] L. D. Landau, Phys. Z. Sowjetunion 2 , 46 (1932). +[2] C. Zener and R. H. Fowler, Proc. R. Soc. Lond. A 137, 696 + +11 +(1932). +[3] E. C. G. Stueckelberg, Helv. Phys Acta 5, 369 (1932). +[4] L. D. Landau, Phys. Z. Sowjetunion 1 , 88 (1932). +[5] S. Shevchenko, S. Ashhab, and F. Nori, Physics Reports 492, 1 +(2010). +[6] B. T. Torosov and N. V. Vitanov, Phys. Rev. A 96, 013845 +(2017). +[7] G. Cao, H. O. Li, T. Tu, L. Wang, C. Zhou, M. Xiao, G. C. Guo, +H. W. Jiang, and G. P. Guo, Nat. Commun. 4, 1401 (2013). +[8] F. Forster, G. Petersen, S. Manus, P. H¨anggi, D. Schuh, +W. Wegscheider, S. Kohler, and S. Ludwig, Phys. Rev. Lett. +112, 116803 (2014). +[9] P. F¨oldi, M. G. Benedict, J. M. Pereira, and F. M. Peeters, Phys. +Rev. B 75, 104430 (2007). +[10] C. Calero, E. M. Chudnovsky, and D. A. Garanin, Phys. Rev. +B 72, 024409 (2005). +[11] B. K. Cooper and V. M. Yakovenko, Phys. Rev. Lett. 96, 037001 +(2006). +[12] A. Banerjee and V. M. Yakovenko, Phys. Rev. B 78, 125404 +(2008). +[13] M. Mark, T. Kraemer, P. Waldburger, J. Herbig, C. Chin, H.-C. +N¨agerl, and R. Grimm, Phys. Rev. Lett. 99, 113201 (2007). +[14] L. Du, M. Wang, and Y. Yu, Phys. Rev. B 82, 045128 (2010). +[15] Q. Niu, X.-G. Zhao, G. A. Georgakis, and M. G. Raizen, Phys. +Rev. Lett. 76, 4504 (1996). +[16] O. Morsch, J. H. M¨uller, M. Cristiani, D. Ciampini, and E. Ari- +mondo, Phys. Rev. Lett. 87, 140402 (2001). +[17] Y. A. Chen, S. D. Huber, S. Trotzky, I. Bloch, and E. Altman, +Nat. Phys. 7, 61 (2011). +[18] M. Cristiani, O. Morsch, J. H. M¨uller, D. Ciampini, and E. Ari- +mondo, Phys. Rev. A 65, 063612 (2002). +[19] Q. Zhang, P. H¨anggi, and J. Gong, Phys. Rev. A 77, 053607 +(2008). +[20] C. S. E. van Ditzhuijzen, A. Tauschinsky, and H. B. van Linden +van den Heuvell, Phys. Rev. A 80, 063407 (2009). +[21] J. Liu, L. Fu, B.-Y. Y. Ou, S.-G. G. Chen, D.-i. I. Choi, B. Wu, +and Q. Niu, Phys. Rev. A 66, 1 (2002), 0105140. +[22] S.-C. Li, L.-B. Fu, W.-S. Duan, and J. Liu, Phys. Rev. A 78, +063621 (2008). +[23] L.-B. Fu, D.-F. Ye, C. Lee, W. Zhang, and J. Liu, Phys. Rev. A +80, 013619 (2009). +[24] D.-F. Ye, L.-B. Fu, and J. Liu, Phys. Rev. A 77, 013402 (2008). +[25] S.-C. Li, Journal of Physics B: Atomic, Molecular and Optical +Physics 43, 205303 (2010). +[26] S.-C. Li and L.-B. Fu, Phys. Rev. A 102, 033323 (2020); Phys. +Rev. A 101, 023618 (2020); Phys. Rev. A 102, 033313 (2020). +[27] J. Liu, L. Fu, B.-Y. Ou, S.-G. Chen, D.-I. Choi, B. Wu, and +Q. Niu, Phys. Rev. A 66, 023404 (2002). +[28] G. J. Milburn, J. Corney, E. M. Wright, and D. F. Walls, Phys. +Rev. A 55, 4318 (1997). +[29] A. Smerzi, S. Fantoni, S. Giovanazzi, and S. R. Shenoy, Phys. +Rev. Lett. 79, 4950 (1997). +[30] S. Kohler and F. Sols, Phys. Rev. Lett. 89, 060403 (2002). +[31] O. V. Ivakhnenko, S. N. Shevchenko, and F. Nori, Physics Re- +ports 995, 1 (2023). +[32] B. Wu and Q. Niu, Phys. Rev. A 61, 023402 (2000). +[33] S.-C. Li, L.-B. Fu, and J. Liu, Phys. Rev. A 98, 013601 (2018). +[34] R. El-Ganainy, K. G. Makris, M. Khajavikhan, Z. H. Mussli- +mani, S. Rotter, and D. N. Christodoulides, Nat. Phys. 14, 11 +(2018). +[35] Y. Ashida, Z. Gong, and M. Ueda, Advances in Physics 69, 249 +(2020). +[36] M. A. Miri and A. Al`u, Science 363, eaar7709 (2019). +[37] W. Zhu, X. Fang, D. Li, Y. Sun, Y. Li, Y. Jing, and H. Chen, +Phys. Rev. Lett. 121, 124501 (2018). +[38] Y. Wu, W. Liu, J. Geng, X. Song, X. Ye, C.-K. Duan, X. Rong, +and J. Du, Science 364, 878 (2019). +[39] J. Li, A. K. Harter, J. Liu, L. de Melo, Y. N. Joglekar, and +L. Luo, Nat. Commun. 10, 855 (2019), arXiv:1608.05061. +[40] W. Xiong, Z. Li, Y. Song, J. Chen, G.-Q. Zhang, and M. Wang, +Phys. Rev. A 104, 063508 (2021). +[41] W. Xiong, Z. Li, G.-Q. Zhang, M. Wang, H.-C. Li, X.-Q. Luo, +and J. Chen, Phys. Rev. A 106, 033518 (2022). +[42] S. Yao and Z. Wang, Phys. Rev. Lett. 121, 086803 (2018). +[43] C. Yin, H. Jiang, L. Li, R. L¨u, and S. Chen, Phys. Rev. A 97, +052115 (2018). +[44] C. H. Lee and R. Thomale, Phys. Rev. B 99, 201103 (2019). +[45] L. Li, C. H. Lee, and J. Gong, Phys. Rev. Lett. 124, 250402 +(2020). +[46] X. Huang, C. Lu, C. Liang, H. Tao, and Y. C. Liu, Light Sci. +Appl. 10 (2021), 10.1038/s41377-021-00464-2. +[47] C. M. Bender and S. Boettcher, Phys. Rev. Lett. 80, 5243 +(1998). +[48] J. Wong, J. Math. Phys. 8, 2039 (1967). +[49] F. H. M. Faisal and J. V. Moloney, J. Phys. B 16, 3109 (1983). +[50] A. Mostafazadeh, J. Math. Phys. 43, 205 (2002); J. Math. Phys. +43, 2814 (2002); J. Math. Phys. 43, 3944 (2002); J. Math. Phys. +43, 6343 (2002); J. Math. Phys. 44, 974 (2003); J. Math. Phys. +45, 932 (2004); Nuclear Physics B 640, 419 (2002). +[51] C. M. Bender, K. A. Milton, and V. M. Savage, Phys. Rev. D +62, 085001 (2000). +[52] C. M. Bender, S. Boettcher, H. Jones, P. N. Meisinger, and +M. Simsek, Phys. Lett. A 291, 197 (2001). +[53] C. M. Bender, D. C. Brody, and H. F. Jones, Phys. Rev. Lett. +93, 251601 (2004); Phys. Rev. D 70, 025001 (2004). +[54] A. Mostafazadeh, Int. J. Mod. Phys. A 21, 2553 (2006). +[55] C. M. Bender, V. Branchina, and E. Messina, Phys. Rev. D 85, +085001 (2012). +[56] C. M. Bender and K. A. Milton, Phys. Rev. D 57, 3595 (1998). +[57] P. Dorey, C. Dunning, and R. Tateo, J. Phys. A 34, L391 (2001). +[58] Y.-G. Miao, H. J. M¨uller-Kirsten, and D. K. Park, Journal of +High Energy Physics 2003, 038 (2003). +[59] L. Jin and Z. Song, Phys. Rev. A 80, 052107 (2009); Phys. Rev. +A 85, 012111 (2012); J. Phys. A 44, 375304 (2011). +[60] F. Minganti, A. Miranowicz, R. W. Chhajlany, +and F. Nori, +Phys. Rev. A 100, 062131 (2019). +[61] B. Longstaff and E.-M. Graefe, Phys. Rev. A 100, 052119 +(2019). +[62] E. M. Graefe, H. J. Korsch, and A. E. Niederle, Phys. Rev. Lett. +101, 150408 (2008). +[63] B. Longstaff and E.-M. Graefe, Phys. Rev. A 100, 052119 +(2019). +[64] X. Shen, F. Wang, Z. Li, and Z. Wu, Phys. Rev. A 100, 062514 +(2019). +[65] W.-Y. Wang, B. Sun, and J. Liu, Phys. Rev. A 106, 063708 +(2022). +[66] S. Ib´a˜nez and J. G. Muga, Phys. Rev. A 89, 033403 (2014). +[67] H.-D. Liu, J. Fang, and T.-Y. Zheng, Commun. Theor. Phys. +68, 439 (2017). +[68] J. I. Cirac, M. Lewenstein, K. Mølmer, and P. Zoller, Phys. Rev. +A 57, 1208 (1998). +[69] W. Wang, L. B. Fu, and X. X. Yi, Phys. Rev. A 75, 045601 +(2007). + diff --git a/1dAyT4oBgHgl3EQf1fkT/content/tmp_files/load_file.txt b/1dAyT4oBgHgl3EQf1fkT/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..39ff80c701249730e0dad0c7569654b289fd3302 --- /dev/null +++ b/1dAyT4oBgHgl3EQf1fkT/content/tmp_files/load_file.txt @@ -0,0 +1,1002 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf,len=1001 +page_content='Nonlinear Non-Hermitian Landau-Zener-St¨uckelberg-Majorana interferometry Xin Wang,1 H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu,1, ∗ and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu2, † 1Center for Quantum Sciences and School of Physics, Northeast Normal University, Changchun 130024, China 2Graduate School of China Academy of Engineering Physics, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 10 Xibeiwang East Road, Haidian District, Beijing, 100193, China (Dated: January 3, 2023) In this work, we have studied the non-Hermitian nonlinear LZSM interferometry in a non-Hermitian N-body interacting boson system in which the non-Hermicity is from the nonreciprocal tunnelings between the bosons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By using the mean-field approximation and projective Hilbert space, the effect of nonreciprocity and nonlin- earity on the energy spectrum, the dynamics, and the formation of the interference fringes have been studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The different symmetries and the impact of the two different types of reciprocity, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' the in-phase tunneling and anti-phase tunneling, on the energy spectrum and the phase transition between the Josephson oscillation and the self-trapping have been investigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the LZSM interferometry, the strength of the nonreciprocity is found to take an essential role in the population of the projective state and the strengths of the interference patterns in the projective space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' While the conditions of destructive and constructive interference under the weak-coupling approximation still only depend on the strength of nonlinearity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Our result provides an application of the non- linear non-Hermitian LZSM interferometry in studying the parameters of a non-Hermitian nonlinear two-level system which related to the nonlinearity and the non-Hermicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' INTRODUCTION The quantum two-level system (TLS) is the most basic part of physical systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Among them, the Landau-Zener (LZ) transition between two levels at an avoided crossing [1–3] has received widespread attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' When these two-level sys- tems are under a strong periodic driving field, a series of LZ transitions occur and the transitions probability exhibit a periodic dependence on the phase (St¨uckelberg phase) accu- mulated between transitions [1, 4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The periodic change is called Landau-Zener-St¨uckelberg-Majorana(LZSM) interfer- ometry [5, 6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' With the development of research, LZSM inter- ferometry has become an important phenomenon in quantum science and technology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' On the one hand, LZSM interfer- ometry is used for ultra-fast universal quantum control of a quantum-dot charge qubit [7] and characterized qubit dephas- ing [8], etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' On the other hand, it has involved many fields so far, such as molecular nanomagnets [9, 10], quasi-one- dimensional layered materials [11, 12], ultracold molecules [13], quantum noise [14], Bose-Einstein condensates [15–19], Rydberg atoms [20], etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Interestingly, if a two-level system takes account of the nonlinear interaction, it may produce un- expected interference features [21–26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the non-linear LZ model, the self-trapping phase transition may occur in LZSM interferometry [27–31], and there may be exceptional ring structures in the energy spectra [32, 33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In recent years, the non-Hermitian quantum systems with real energy spectra received widespread attention in the- ory and experiment [34–41].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' There are two kinds of non- Hermicity, asymmetric coupling strengths in nonreciprocal systems and the gain-loss in reciprocal system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' There are two kinds of non-Hermitian Hamiltonians, describing nonre- ciprocal systems with asymmetric coupling strengths [42–46] ∗ liuhd100@nenu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='cn † lbfu@gscaep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='cn and gain-loss systems [37–41].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Bender and Boettcher dis- covered a series of parity-time (PT) -symmetric Hamiltonians [47], which could result in real energy spectra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Mostafazadeh generalized this type of Hamiltonian to a η-pseudo-Hermitian quantum theory which explains the conditions for the non- Hermitian system to have the real energy spectra (η is a pos- itive Hermitian operator) [48–50].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The theory has been ap- plied in many fields for more than ten years of development, such as quantum field theory [51–55], super-symmetric quan- tum mechanics [56, 57], non-commutative field theory [58], quantum information [59], etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Especially, there always ex- ists some exceptional points (EPs) in the real energy spec- trum of the non-Hermitian system [60, 61], at which two or more eigenstates of the system coalesce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' These EPs of the en- ergy spectrum in the parameter space are closely related to the symmetry, topological properties, and phase transitions of the system [34–36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Consequently, efforts have been put forward to extend the study of LZ problem to non-Hermitian system [6, 62–65].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, for non-Hermitian systems and nonlin- ear LZSM interference, it is natural to ask how will the en- ergy spectrum of the nonlinear LZ system changes if the non- Hermiticity emerges?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Will non-linearity affect EPs?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Since the populations of the bare states on the adiabatic eigenstates normally can not be normalized by a time-independent coeffi- cient [66].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Can the interesting self-trapping effect in the case of nonlinear non-Hermitian still be observed?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' We shed lights on these questions in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By setting up the projec- tive Hilbert space, we show that the populations of the projec- tive quantum states can still achieve LZSM interferometry and analyzed the influence of non-Hermicity and nonlinearity on the energy spectra and the interference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Then, we discussed the influence of non-Hermitian on the self-trapping effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fi- nally, under the weak-coupling approximation of the projec- tive quantum states, we further demonstrated the validity and accuracy of the proposed method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The structure of the paper is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='II, we introduce a non-Hermitian N-body interacting boson system which is equivalent to a nonlinear nonreciprocal two-level arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='00734v1 [quant-ph] 2 Jan 2023 2 system with periodic driving in the mean-field approxima- tion, and discussed the energy spectrum of this two-level sys- tem, In Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='III, the influence of nonlinear strength and non- Hermiticity on LZSM interferometry and the self-trapping ef- fects has been studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Under the weak-coupling limit, the non-Hermicity does not affect the conditions of destructive interference and constructive interference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Finally, the con- clusions are summarized in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' NONLINEAR NONHERMITIAN TWO-LEVEL MODEL The second quantized Hamiltonian of a nonreciprocal interacting-boson system is ˆH0 = γ 2(ˆa†ˆa − ˆb†ˆb) + ∆2 2 ˆa†ˆb + ∆1 2 ˆaˆb† − c 4N (ˆa†ˆa − ˆb†ˆb)2, (1) where annihilation operators ˆa, ˆb and generation operators ˆa†, ˆb† are for the different quantum states that are the left and right well in the double-well BEC system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' γ = A sin(ωt) + ϵ0 is the monochromatic driving field with amplitude A, fre- quency ω, and offset ϵ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' c is the interaction strength between bosons, ∆i (i = 1, 2) is the tunneling amplitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' When the total number of bosons N → ∞, all particles are assumed to be in the same spin coherent state in the mean-field approx- imation [67, 68].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Considering that the quantum states of the non-Hermitian system are in a dual Hilbert space to keep the normalize condition [50], the selected coherent states need to be defined by both left and right states as |Ψr sc⟩ = 1 √ N!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (α1ˆa† + β1ˆb†)N|∅⟩, |Ψl sc⟩ = 1 √ N!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (α2ˆa† + β2ˆb†)N|∅⟩, (2) Based on this, we derive the semi-classical Hamiltonian (see Appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A) ˆHM = ⟨Ψl sc| ˆH0|Ψr sc⟩ N = γ 2(α1α∗ 2 − β1β∗ 2) + ∆2 2 α∗ 2β1 + ∆1 2 α1β∗ 2 − c 4(β1β∗ 2 − α1α∗ 2)2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (3) by the dynamical evolution of the semiclassical Hamiltonian [67] i˙α1 = ∂ ˆHm ∂α∗ 2 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' i˙β1 = ∂ ˆHm ∂β∗ 2 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (4) we can construct the following dimensionless Schr¨odinger equation i ∂ ∂t � α1 β1 � = ˆHmF � α1 β1 � ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (5) with the MF Hamiltonian ˆHmF = � γ 2 + c 2(β1β∗ 2 − α1α∗ 2) ∆1 2 ∆2 2 − γ 2 − c 2(β1β∗ 2 − α1α∗ 2) � ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (6) t3 t1 t2 t3 t2 t1 ωt/π (b) ϵ0 = 5 (a) ϵ0 = 0 En(t) 0 1 2 3 4 6 4 2 0 2 4 0 1 2 3 4 5 0 5 En(t) c/Δ=0 c/Δ=3 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Time evolution of the energy levels for different offsets: (a) ϵ0 = 0 and (b) ϵ0 = 5, where A = 10, ω = 1 and ∆1∆2 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The time-dependent adiabatic energy levels (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=', ∆ = 1) are shown by the red (c = 0) and black (c = 3) dashed lines, while the diabatic energy levels (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=', ∆ = 0 ) are shown by the blue (c = 0) and green (c = 3) solid lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' and state |ψr⟩ = (α1, β1)T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, the model Hamiltonian under periodic driving can be described by a nonlinear nonre- ciprocal two-level Hamiltonian ˆH = ∆1 + ∆2 4 ˆσx+ ∆1 − ∆2 4 i ˆσy+ γ(t) + c(β1β∗ 2 − α1α∗ 2) 2 ˆσz (7) where ˆσx,y,z are the Pauli matrices, α1, α2, β1, β2 are the prob- ability amplitudes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The dynamic equations of the system are [50] i ∂ ∂t|ψr⟩ = ˆH|ψr⟩, i ∂ ∂t|ψl⟩ = ˆH†|ψl⟩, (8) where ⟨ψl|ψr⟩ = 1 and the quantum states |ψr⟩ = α1 |↑⟩ + β1 |↓⟩ , |ψl⟩ = α2 |↑⟩ + β2| |↓⟩ (9) are represented under the diabatic basis {|↑⟩ , |↓⟩} with spin eigenstates |↑⟩ and |↓⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the adiabatic basis, the left and right instantaneous eigenstates of the time-dependent Hamiltonian ˆH are derived by[50] ˆH|φr n⟩ = En|φr n⟩, ˆH†|φl n⟩ = E∗ n|φl n⟩, (10) where ⟨φl m|φr n⟩ = δnm (n = 1, 2), the eigenenergies En(t) are determined by the quartic equation (see Appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B) E4+cE3+ 1 4(c2−γ2−∆1∆2)E2− c∆1∆2 4 E− ∆1∆2c2 16 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (11) By solving equation (11), we draw the energy spectrum of the system (7) (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1 and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The two parameters ∆ ≡ � |∆1∆2|, k ≡ � |∆1/∆2| (12) 3 Ep t3 t2 t1 t1 Ep (b) ϵ0 = 5 c/Δ=0 c/Δ=3 (a) ϵ0 = 0 0 1 2 3 4 6 4 2 0 2 4 1 2 3 4 5 0 5 En(t) En(t) ωt/π t3 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Time evolution of the energy levels for different offsets: (a) ϵ0 = 0 and (b) ϵ0 = 5, where A = 10, ω = 1 and ∆1∆2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The time-dependent adiabatic energy levels (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=', ∆ = √|∆1∆2| = 1) are shown by the red (c = 0) and black (c = 3) dashed lines, while the diabatic energy levels (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=', ∆ = 0 ) are shown by the blue (c = 0) and green (c = 3) solid lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' are introduced to describe the mean tunneling amplitude and the nonreciprocity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In the in-phase tunneling case ∆1∆2 > 0 as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1, the energy spectrum of the system (7) is the same as the Her- mitian Hamiltonian ˆHh = ∆ 2 ˆσx + γ(t)+c(|β|2−|α|2) 2 ˆσz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, the Hamiltonian ˆH and quantum states |ψr⟩ of the two non- reciprocal systems can be related to the Hermitian system by following relation ˆHh = ˆS ˆH ˆS −1, |ψ⟩ = ˆS |ψr⟩ = � α1 kβ1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (13) where ˆS = � 1 0 0 k � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Compared with ˆHh, the nonreciproc- ity, which only affects the eigenstates of the system, neither changes the eigenvalue nor destroys the symmetry of the sys- tem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In the anti-phase tunneling case ∆1∆2 < 0 as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='2 , the non-adiabatic energy levels have a series of de- generate points (EPs) when c = 0 (see the crossing points of red dash lines in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='2, and the imaginary parts of En are not shown).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Interestingly, when the nonlinearity is added (c � 0), the EPs disappear and the near-degenerate regions are formed (see the black dashed lines in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' When considering the offset (ϵ0 � 0), the near-degenerate regions disappear near the times t ′ n = t1+t3 2 + 2nπ ω (with n being an integer), the period changes from nπ ω to 2nπ ω , and the ring energy levels will tend to degenerate at times t1 + 2mπ ω (with m being an integer) as ϵ0 in- creases as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Obviously, the nonlinearity affects the EPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By equation (11), En = 0 is the root of the equation iff c∆1∆2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, the existence of c does not allow the existence of EPs in the anti-phase tunneling case ∆1∆2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Next, we analyzed the cases of the existence of real roots of 0 1 2 3 4 5 c/ 4 2 1012 4 (t)/ FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Different regions for parameter space of c ∆ and γ ∆ in the anti-phase tunneling case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Region I for f( c ∆, γ ∆) < 0, Region II for γ2 ∆2 > 1 when f( c ∆, γ ∆) > 0, Region III for γ2 ∆2 < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Naturally, when f( c ∆, γ ∆) < 0, the inequality γ2 ∆2 > 1 is guaranteed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' the energy spectrum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the special cases c = 0, the eigenenergies of the system are ± � γ2(t) + ∆1∆2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' It is easy to find that the EPs emerge at γ2(t) = −∆1∆2 in the anti-phase tunneling case ∆1∆2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For c � 0, the nature (real or not) of the roots of the energy equation (11) depend on the sign of δ = −c2γ2∆1∆2ξ, (14) with ξ = ((c2 − γ2 − ∆1∆2)3 − 27c2γ2∆1∆2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' When δ > 0, there are two real roots and a pair of conjugate complex roots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The system will always have real eigenener- gies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' When δ < 0, the equation has four unequal real roots if c2 + 2(∆1∆2 + γ2) and (∆1∆2 + γ2)(2c2 + ∆1∆2 + γ2) are both positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Otherwise, the equation has two pairs of unequal conjugate complex roots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Obviously, for the in-phase tunnel- ing case ∆1∆2 > 0, there always exists real eigenenergies of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the anti-phase tunneling case with δ < 0, the conditions that the energy equation has real roots can be simply described as γ2 ∆2 > 1 in f( c ∆, γ ∆) = [( c ∆)2−( γ ∆)2+1]3+27( c ∆)2( γ ∆)2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In- terestingly, γ ∆ = ±1 are exactly the tangent lines of f( c ∆, γ ∆) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, the condition is naturally satisfied (as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='3), so we get the same conclusion as ∆1∆2 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Finally, we consider another two special case: γ = 0 and ξ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The energy spectrum are all complex only when δ = 0, c(∆1∆2 − γ2) = 0, (∆1∆2 + γ2)(2c2 + ∆1∆2 + γ2) = 0 and c2 + 2(∆1∆2 + γ2) < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For, c � 0 and ∆1∆2 � 0, these conditions cannot be satisfied at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In a word, the system will always have real eigen energies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' These results on the nature of the eigenenergies can be ex- plained by the symmetry related to the different types of non- reciprocal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the in-phase tunneling case ∆1∆2 > 0, the symmetry of the system is unbroken since the system can be transformed into a Hermitian one with ˆS .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, the real eigen energies are guaranteed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' While it is not a necessary re- sult for the anti-phase case ∆1∆2 < 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Although the non- linearity c makes EPs disappear in the evolution of En, the eigenvalues of one energy state are still complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For these two cases, it is inevitable to have different effects on the evo- lution of states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' So next we will analyze the dynamic evolution 4 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The interference patterns of the population probability |α1|2 at time t = 50/∆ as a function of ϵ0/∆ and ω/∆ in the state (α1(0), β1(0)) = (0, 1), (α2(0), β2(0)) = (0, 1) with (a) c/∆ = 0, ∆1∆2 > 0, (b) c/∆ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05, ∆1∆2 > 0, (c) c/∆ = 0, ∆1∆2 < 0, and (d) c/∆ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05, ∆1∆2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The other parameters are chosen as k = 2, A/∆ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The white area is singular, and |α1|2 tends to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' of the two cases based on the method of the projective Hilbert space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' NONLINEAR NON-HERMITIAN LZSM INTERFEROMETRY In the nonlinear Hermitian LZ system, The LZSM inter- ference patterns can be destructive or constructive, which are determined by the St¨uckelberg phases and the nonlinearity can strongly change the features of the LZSM interferometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 4, the interference pattern of |α1|2 is axisymmet- ric for the linear in-phase tunneling case (c = 0, ∆1∆2 > 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In the nonlinear case (c � 0), the symmetry of the interference pattern is destroyed (as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 4b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' When c = 0 and ∆1∆2 < 0, the Eps make the interference patterns divergent and form a singular region (white area in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 4c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' It is hard to study the influence of each parameter on the features of the LZSM interferometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Next, we propose the concept of pro- jective Hilbert space (see AppendixC for detail) and find the effect of the nonreciprocity k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Through equations (8), without losing generality, the quan- tum state |ψr⟩ can be defined as |ψr⟩ = eµ(t)+iν(t)| ˜ψ⟩ = eµ(t)+iν(t) � ˜a ˜b � , (15) with the normalization relation ⟨ ˜ψ| ˜ψ⟩ = 1 (µ and ν are two real parameters), where | ˜ψ⟩ = � ˜a ˜b � is the quantum state in the pro- jective Hilbert space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Then, we draw the normalized interfer- ence patterns |˜a|2 = |α1|2/(|α1|2+|β1|2) (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Comparing with |α1|2, the regulation of the parameters on the |˜a|2 interfer- ence pattern are emerge when c = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' This is because the LZSM interference is determined by the St¨uckelberg phases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The phases accumulated in the evolution process are retained in the quantum states | ˜ψ⟩ in the projective Hilbert space by FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The interference patterns of the projective state population probability |˜a|2 at time t = 50/∆ as a function of ϵ0/∆ and ω/∆ in the state (α1(t0), β1(t0)) = (0, 1), (α2(t0), β2(t0)) = (0, 1) in the anti-phase tunneling case ∆1∆2 < 0 with (a) c/∆ = 0, k = 2, (b) c/∆ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05, k = 2, (c) c/∆ = 0, k = 1/2, and (d) c/∆ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05, k = 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' removing the divergence caused by the non-Hermitian term em(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, when c = 0, the populations of the correspond- ing the projective quantum states in the singular region of the quantum states are limited to the values affected by the nonre- ciprocity k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' To further reveal the influence of parameter k, we next start from the simplest case with c = 0 and then analyze the case with c � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Then, we demonstrated the validity and accuracy of the proposed method and numerical results in the weak-coupling limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The effect of noncrciprocity and the projective quantum states in the linear non-Hermitian system Assuming c = 0, the Hamiltonian of the system (7) be- comes ˆHmF = � γ 2 ∆1 2 ∆2 2 − γ 2 � , (16) where ∆1∆2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Consider the quantum state |ψr⟩ = eµ+iν| ˜ψ⟩ = eµ+iν � ˜a ˜b � , and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (8), one can get ˙µ = − i 2⟨ ˜ψ| ˆH − ˆH†| ˜ψ⟩, ˙ν = −1 2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + i⟨ ˜ψ| ˙˜ψ⟩, (17) Substituting Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (17) and the definition | ˜ψ⟩ = � ˜a ˜b � ≡ � sin θ 2eiϕ cos θ 2 � into equation (8), we have (see Appendix C for 43 3 (a)c/△=0 (b)c/ △=1 0 9 6 3 3 (c)c/ △=0 (d)c/ △=1 0 9 6 3 0 3 6 9 9 6 3 0 3 6 Eo/△ Eo/△2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05 0 30 20 10 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05 0 99 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='63 3 (a) c/ △=0 (b) c 0 9 6 3 3 (c) c/ △=0 (d) c 0 9 6 3 0 3 6 9 9 6 3 0 3 /△ E0/△ Eo0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='2 /△=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1 △=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05 0 6 99 65 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The dynamical evolution trajectory of the projective right quantum state of the system (16) on the Bloch sphere with the dif- ferent non-Hermitian: (a) k = 2 and (b) k = 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The numerical simulation parameters: A ∆ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, ϵ0 = 0 and the initial condition is (˜a, ˜b) = (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The z-axis coordinates of the points of the red dashed circle on the Bloch sphere are z0 = cos θ0 = 1−k2 1+k2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' details) ˙θ = −∆1 sin ϕ cos2 θ 2 − ∆2 sin ϕ sin2 θ 2, ˙ϕ = −γ − ∆1 2 cot θ 2 cos ϕ + ∆2 2 tan θ 2 cos ϕ, ˙µ = ∆2 − ∆1 4 sin θ sin ϕ, ˙ν = γ 2 − ∆2 2 tan θ 2 cos ϕ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (18) For ϵ0 = 0, when the time is long enough, the projective state will always be on a certain circle (˙θ = 0) of the Bloch sphere (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (18), we can get the equation of the circle where the projective quantum state finally lies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' surprisingly, we find the correlation between k and θ0 = limt→∞ θ as k2 = tan2 θ0 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (19) Therefore, in combination with Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, we can explain why |˜a|2 is limited to a certain value in the singular region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The influence of interaction and non-Hermitian on population in the projective Hilbert space In the nonlinear Hermitian system[33], i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e ∆ = ∆1 = ∆2, when ϵ0 = 0 and A ≪ ω, the population of the system will have the self-trapping phase transition and the Josephson os- cillation under the different nonlinearities, and the boundary line is c/∆ = 2[67, 69].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Based on this, we next study the non- linear non-Hermitian LZSM interference patterns for ϵ0 = 0 with different nonlinearities c, non-Hermitian parameters k and mean amplitudes ∆ [see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='7 and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Firstly, we consider the in-phase tunneling case ∆1∆2 > 0, where the symmetry of the system is unbroken.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the Her- mitian Hamiltonian ˆHh, near the boundary of two different os- cillations, the maximum population of the self-trapping region is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, and then the amplitude gradually decreases with the in- crease of c/∆.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The populations of the state for non-Hermitian FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The nonlinear non-Hermitian LZSM interference patterns with different nonlinearities (a) k = 2, and (b) k = 1/2 for weak driving at ϵ0 = 0 and the in-phase tunneling case ∆1∆2 > 0: the projective population |˜a|2 as a function of ∆/ω and c/ω for A/ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05 from the initial time t0 = 0 to t = 2π/ω , The red dashed-dotted line (with slope 1/2) is plotted to denote the boundary between the different oscillations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Hamiltonian ˆH with ∆1 � ∆2 is only different from those for the Hermitian Hamiltonian ˆHh in a weight of k as shown in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (13).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, we can get |˜a|2 = k2|˜b|2 at the boundary similar with the Hermitian case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, the boundary line c/∆ = 2 (red dashed line in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='7) between the two regions (self-trapping and Josephson oscillation) is the same as that in the Hermitian system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The amplitude of the population of the projective quantum state is determined by the nonreciprocal k as show in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='7(a) and (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Then, we consider the dynamical evolution of the projective quantum state near the boundary, by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (8) and (15), one can obtain ˙θr =ImA sin θr − ∆1 sin ϕr cos2 θr 2 − ∆2 sin ϕr sin2 θr 2 , ˙ϕr = − γ − ReA − ∆1 2 cot θr 2 cos ϕr + ∆2 2 tan θr 2 cos ϕr, ˙µr = − ImA 2 cos θr + ∆2 − ∆1 4 sin θr sin ϕr, ˙νr =γ 2 + ReA 2 − ∆2 2 tan θr 2 cos ϕr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (20) with the right quantum state |ψr⟩ = � α1 β1 � = eµr+iνr � ˜a ˜b � = eµr+iνr � sin θr 2 eiϕr cos θr 2 � , and ˙θl = − ImA sin θl − ∆2 sin ϕl cos2 θl 2 − ∆1 sin ϕl sin2 θl 2 , ˙ϕl = − γ − ReA − ∆2 2 cot θl 2 cos ϕl + ∆1 2 tan θl 2 cos ϕl, ˙µl =ImA 2 cos θl + ∆1 − ∆2 4 sin θl sin ϕl, ˙νl =γ 2 + ReA 2 − ∆1 2 tan θl 2 cos ϕl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (21) with the left quantum state |ψl⟩ = � α2 β2 � = eµl+iνl � ˜al ˜bl � = eµl+iνl � sin θl 2 eiϕl cos θl 2 � , where A ≡ c(α1α∗ 2 − β1β∗ 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By numerical simulation, we give the dynamical evolution of the projective right state on the Bloch sphere near the boundary c/∆ = 2 in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 1/2XZ (a) k=2 (b) k=13 5 (a) k=2 0 0 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5 0 01013 5 (b) k=1/2 0 0 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5 0 0106 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The dynamics of the projective states represented by the trajectories spherical coordinates (θ, φ) on the Bloch sphere in the in-phase tunneling case ∆1∆2 > 0 with different strengths of nonlin- earity and nonreciprocity: (a) c/∆ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='9, k = 2, (b) c/∆ = 2, k = 2, (c) c/∆ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1, k = 2, (d) c/∆ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='9, k = 1/2, (e) c/∆ = 2, k = 1/2, and (f) c/∆ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1, k = 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The other parameters are chosen as A ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05, ϵ0 = 3, and the initial state is (˜a, ˜b) = (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The z- axis axis coordinates of the red dashed circle on the Bloch sphere are z0 = cos θ0 = 1−k2 1+k2 , and the z-axis axis coordinates of the green dashed circle on the Bloch sphere are z ′ 0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' When c/∆ > 2, the projective states can only evolve on the surface of the Bloch sphere above the red dashed circle as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 8 (b), (c), (e) and (f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The red circle represent the projective states of which the relative population differ- ence |˜b|2 − |˜a|2 is 1−k2 k2+1 = cos θ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By |˜a|2 = k2|˜b|2 and the nor- malization condition, cos θ0 = |˜b|2 − |˜a|2 labels the boundary between the self-trapping region and the Josephson oscilla- tion region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' As we discussed before, the nonreciprocal k does not affect the constructive interference and destructive inter- ference, but affects the the relative population difference of the state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' When k is larger, the relative population difference at the boundary between the two regions are smaller [see the red circle in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 8(a-c) and (d-f)] and the projective popula- tion probability |˜a|2 are smaller [see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 7 (a) and (b)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the anti-phase tunneling case ∆1∆2 < 0, because of the exis- tence of EPs in the linear case c = 0, the projective quantum states reaches self-trapping region no matter how weak the nonlinearity is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The trajectories of the projective states on the Bloch sphere will always above the red dashed circles which label the boundaries between the self-trapping region and the Josephson oscillation region as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' the maximum population of the projective quantum state is still affected by the nonreciprocity k as shown in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (19) and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='10(a-d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The nonlinear non-Hermitian LZSM interference patterns with different nonlinearities (a) k = 2, and (b) k = 1/2 for weak driving at ϵ0 = 0 and the anti-phase tunneling case ∆1∆2 < 0: the projective population |˜a|2 as a function of ∆/ω and c/ω for A/ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05 from the initial time t0 = 0 to t = 2π/ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The dynamics of the projective states represented by the tra- jectories spherical coordinates (θ, φ) on the Bloch sphere in the anti- phase tunneling case ∆1∆2 < 0 with different strengths of nonlinear- ity and nonreciprocity: (a) c/∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1, k = 2, (b) c/∆ = 1, k = 2, (c) c/∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1, k = 1/2, and (d) c/∆ = 1, k = 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The other parameters are chosen as A ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05, ϵ0 = 3, and the initial state is (˜a, ˜b) = (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The z-axis coordinates of the red dashed circle on the Bloch sphere are z0 = cos θ0 = 1−k2 1+k2 , and the z-axis coordinates of the green dashed circle on the Bloch sphere are z ′ 0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Compare Fig Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='10(b) and (d) with Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='10(a) and (c), it is easy to find that the stronger the nonlinearity, the stronger the degree of self-trapping effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Weak-coupling limit of the projective quantum states: ∆ ≪ ω When the weak-coupling limit is considered, the adia- batic energy levels will be difficult to transition in the near- degenerate region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' However, in this approximation, we only make |˜ag(t)|2 ∼ |˜ag(t0)|2 and |˜bg(t)|2 ∼ |˜bg(t0)|2 where g = r, l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Assuming that the initial condition is (˜ag(t0), ˜bg(t0)) = (0, 1), the quantum state can always be written in the following form: |ψg(t)⟩ = eµg(t)+iνg(t) � 0 1 � , (22) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='83 5 (a) k=2 0 0 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='2 0 10100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='23 5 A (b) k=1/2 0 0 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1 0 10101(c) c/△=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1 (d) c/ △=1(a) c/△=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1 (b) c/ △= Z↑ Z X(c) c/△=2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1 (d) c/ △=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (e) c/ △=2 (f) c/ △=2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='9(a) c/△=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='9 (b) c/ △=27 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='04 0 2 4 10-3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='04 0 1 2 10-3 0 5 10 0 1 2 10-4 0 5 10 0 1 2 10-3 exact approximate (a) c/ =0 (b) c/ =0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5 (d) c/ =0 (e) c/ =0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5 (c) c/ =1 (f) c/ =1 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Time evolution of the projective population probability |˜a|2 for weak coupling in the in-phase tunneling case ∆1∆2 > 0, with different nonlinearities: (a) c/ω = 0, k = 2, (b) c/ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, k = 2 and (c) c/ω = 1, k = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (d) c/ω = 0, k = 1/2, (e) c/ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, k = 1/2 and (f) c/ω = 1, k = 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The other parameters are A/ω = 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, ∆/ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05, and ϵ0/ω = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' where g = r, l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (8),(17) and (22), we get ˙µr(t)+i˙νr(t)+ ˙µl(t) − i˙νl(t) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' This means β1(t)β∗ 2(t) − α1(t)α∗ 2(t) ∼ β1(t0)β∗ 2(t0) − α1(t0)α∗ 2(t0), (23) Based on this approximation, we can transform the dynamic of the system from Schr¨odinger picture to Dirac picture by in- troducing the gauge transformation φr(t) = U(t)ϕr(t) [U(t) = ϵ0 2t − A cos(ωt) 2ω + c 2(β1β∗ 2 − α1α∗ 2) with ϕr(t) = [˜α1, ˜β1]T ] [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Under the new basis, the nonlinear dynamic Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (8) become (Assuming ∆1 > 0): i ∂ ∂t � ˜α1 ˜β1 � = � 0 kΩ (−1)j k Ω∗ 0 � � ˜α1 ˜β1 � , (24) and i ∂ ∂t � ˜α2 ˜β2 � = � 0 (−1)j k Ω∗ kΩ 0 � � ˜α2 ˜β2 � (25) with Ω = ∆ 2 eiΦ(t), Φ(t) = ϵ0t − A cos(ωt) ω + ct, (26) and j = 1, 2 corresponding to the anti-phase case ∆2 < 0 and in-phase case ∆2 > 0, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ω denotes the field- induced Rabi frequency where Φ(t) is the relative phase of two diabatic energy levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The nonreciprocity k in front of 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='04 0 2 4 10-3 0 5 10 0 1 2 10-3 0 5 10 0 1 2 10-4 (d) c/ =0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5 (b) c/ =0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5 (c) c/ =0 (a) c/ =0 exact approximate FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Time evolution of the Projective quantum state population probability |˜a|2 for weak coupling in the anti-phase tunneling case ∆1∆2 < 0, with different nonlinearities: (a) c/ω = 0, k = 2 and (b) c/ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, k = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (c) c/ω = 0, k = 1/2 and (d) c/ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, k = 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The other parameters are A/ω = 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, ∆/ω = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05, and ϵ0/ω = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ω correspond to the weight of the populations of the projec- tive quantum state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Thus, we can understand the fact that the maximums value of the populations under the self-trapping regions change with k2 in the in-phase case ∆1∆2 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In a full cycle, Φ(t) can be approximately written as Φ(t) ⋍ � t3 t1 (ϵ0 + c − nω)dt = 2π ω (ϵ0 + c − nω) (27) with n = 0, ±1, ±2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='. When Φm = 2mπ, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' c + ϵ0 ≃ (n + m)ω = dω (m, d = 0, ±1, ±2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='), the patterns are constructive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' While, the patterns will be destructive when Φm = (2m+ 1 2)π,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By calculating the nonlinear equation (8), the linear equa- tion(24), we can get the exact solution and approximate solu- tion respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='11, we show multi-period LZSM in- terference fringes with different characteristics in the in-phase tunneling case ∆2 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' when c = 0, 1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=', Φm = 2mπ, the patterns are constructive, and when c = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='5, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=', Φm = (2m + 1 2)π, the patterns are destructive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In all non- linear cases, the two are consistent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='12, we show the anti-phase tunneling case ∆2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Like the in-phase tunneling case, the constructive interference and destructive interference only depend on m, and the nonreciprocity k only affect the maximal value of the projective population probability |˜a|2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' CONCLUSION In this work, we have studied the non-Hermitian nonlin- ear LZSM interferometry in which the non-Hermicity is from the nonreciprocal tunnelings between the bosons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By using the mean-field approximation and projective Hilbert space, the effect of nonreciprocity and nonlinearity on the energy 8 spectrum, the dynamics, and the formation of the interfer- ence fringes have been studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The results show that dif- ferent types of reciprocity correspond to different types of symmetries of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the in-phase tunneling case ∆1∆2 > 0, the system can be transformed into a Hermitian one with a nonunitary transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' It has the same energy spec- trum and boundary between the Josephson region and the self- trapping region as the Hermitian one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' While it is not a neces- sary result for the anti-phase case ∆1∆2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The EPs can only exist in its linear case c = 0 and the eigenvalues of one en- ergy state will be complex in its nonlinear case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' There is only a self-trapping region in this case since the evolution of the projective states will always be above the boundary when the nonlinearity exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For the LZSM interferometry, the strength of the nonreciprocity k is found to take an essential role in the population of the projective state and determine the maximal values and strengths of the interference patterns in the projec- tive space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Finally, under the weak-coupling approximation, we found that the types and strengths of the nonreciprocity do not affect the conditions of destructive and constructive inter- ference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' It only depends on the strength of nonlinearity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Our result provides a possible way to study the parameters of a non-Hermitian nonlinear two-level system and its related ex- ternal fields by the LZSM interferometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' ACKNOWLEDGMENTS We thank S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Dou for their helpful discus- sions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' This work is supported by the National Natural Sci- ence Foundation of China (NSFC) (Grants Nos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 11875103, 12147206, 11725417, 12088101, 12047548, and U1930403), and Science Challenge Project (Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' TZ2018005)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Appendix A: Semi-classical Hamiltonian In the non-Hermitian system, let ˆH be a non-Hermitian Hamiltonian with a complete biorthonormal eigenbasis {|ψr n⟩, |ψl n⟩}, the orthogonal normalization of the quantum states are ⟨ψr n|ψl m⟩ = δnm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (A1) Similarly, for system (1), in the mean-field approximation, the coherent state should be written as |Ψr sc⟩ = 1 √ N!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (α1ˆa† + β1ˆb†)N|∅⟩, (A2) |Ψl sc⟩ = 1 √ N!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (α2ˆa† + β2ˆb†)N|∅⟩, (A3) According to the normalization condition ⟨Ψl sc|Ψr sc⟩ = 1: α1α∗ 2 + β1β∗ 2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (A4) Then, applying the Hamiltonian of system (1) to the right quantum state |Ψr sc⟩ , one can obtain ˆH|ψr SC⟩ = �γ 2 ˆa†ˆa − ˆb†ˆb + ∆2 2 ˆa†ˆb + ∆1 2 ˆaˆb† − c 4N (ˆa†ˆa − ˆb†ˆb)2) � 1 √ N!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' N � r=0 Cr N(α1ˆa†)N−r(β1ˆb†)r|∅⟩, (A5) When calculating the expectation value of an observable, the quantum states of the systems are normalized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' So in the system (1), the expectation value of ˆH0 should be written as ⟨Ψl sc| ˆH0|Ψr sc⟩ =Nγ 2 N � r=0 (N − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (N − r − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='r!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (α1α∗ 2)N−r−1(β1β∗ 2)rα1α∗ 2 − Nγ 2 N � r=0 (N − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (N − r)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (r − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (α1α∗ 2)N−r(β1β∗ 2)r−1β1β∗ 2 +N(∆2 2 N � r=0 Cr N−1(N − r)(α1α∗ 2)N−r−1(β1β∗ 2)rα∗ 2β1 + ∆1 2 N � r=0 Cr−1 N−1r(α1α∗ 2)N−r(β1β∗ 2)r−1α1β∗ 2) + N � r=0 Cr−1 N−1r(α1α∗ 2)N−r(β1β∗ 2)r−1α1β∗ 2) − cN 4 (β1β∗ 2 − α1α∗ 2)2 =Nγ 2 (α1α∗ 2 − β1β∗ 2) + N∆2 2 (α∗ 2β1) + N∆1 2 (α1β∗ 2) − cN 4 (β1β∗ 2 − α1α∗ 2)2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (A6) The expectation value of each particle is ˆHM = ⟨Ψl sc| ˆH0|Ψr sc⟩ N = −c 4(β1β∗ 2 − α1α∗ 2)2 + ∆2 2 (α∗ 2β1) + ∆2 2 (α1β∗ 2) + γ 2(α1α∗ 2 − β1β∗ 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (A7) 9 Appendix B: Derivation of the Energy level equation In the non-Hermitian system, the Hamiltonian ˆH has a complete biorthonormal eigenbasis {|ψr n⟩, |ψl n⟩} of satisfying ˆH|φr n⟩ = En|φr n⟩, (B1) ˆH†|φl n⟩ = E∗ n|φl n⟩, (B2) ⟨φl m|φr n⟩ = δmn, (n = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=') (B3) By equations (B1), we can naturally conclude that the adiabatic basis of the system (7) satisfies Fα1 + i∆ 2 β1 = Eα1, i∆ 2 α1 − Fβ1 = Eβ1, (B4) F∗α2 − i∆ 2 β2 = E∗α1, − i∆ 2 α2 − F∗β2 = E∗β2, (B5) α1α∗ 2 + β1β∗ 2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (B6) where F ≡ γ 2 + c 2(β1β∗ 2 − α1α∗ 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' To derive non-trivial solutions of Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (B1) and (B2), we must ensure that | ˆH − E ˆI| = 0 and | ˆH† − E∗ ˆI| = 0 (ˆI is an identity matrix).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Namely, E2 − F2 + ∆2 4 = 0, (B7) E∗2 − F∗2 + ∆2 4 = 0, (B8) By (B4) and the complex conjugate of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (B5), we have α1α∗ 2 β1β∗ 2 = −4(E + F)2 ∆2 , (B9) By the normalization (B6) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (B7), it becomes β1β∗ 2 = E − F 2E , (B10) Therefore, F ≡ γ 2 + c 2(β1β∗ 2 − α1α∗ 2) = γ 2 − cF 2E .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (B11) Substitute Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (B11) into Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (B7), we finally have E4 + cE3 + 1 4(c2 − γ2 + ∆2)E2 + c∆2 4 E + ∆2c2 16 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (B12) Appendix C: The projective space for non-Hermitian quantum system Consider the following Schr¨odinger equation i d dt|ψ(t)⟩ = ˆH|ψ(t)⟩, (C1) 10 where ˆH is generally a non-Hermitian Hamiltonian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Let us define |ψ(t)⟩ = eµ+iν| ˜ψ(t)⟩ with the normalization relation ⟨ ˜ψ(t)| ˜ψ(t)⟩ = 1 (µ and ν are two real parameters).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' From Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (C1) and its Hermitian conjugation, one can get ˙µ = − i 2⟨ ˜ψ| ˆH − ˆH†| ˜ψ⟩, (C2) and ˙ν = −1 2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + i⟨ ˜ψ| ˙˜ψ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (C3) One has to keep mind that the above deduction is some different from what had been done by using adjoint equation of (C1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In quantum theory with Hermitian Hamiltonian systems, |ψ(t)⟩ and | ˜ψ(t)⟩ are equivalence, since the time evolution is unitary (probability preserving) and they are only different in a global phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Under this equivalence, | ˜ψ(t)⟩ can be employed as a vector on so-called projective Hilbert space of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' However, for a system with a non-Hermitian Hamiltonian, the time evolution is not unitary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Hence, though the state vectors only differ in norms, they may describe different system states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Nevertheless, we can still formally set up the projective Hilbert space for a non-Hermitian system by using | ˜ψ(t)⟩ as a state on it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Based on the above definition, from Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (C2) and (C3), we can see that one can obtain the norm increment and the global phase of the state acquiring in its time evolution only from the trace in the projective space, the latter is as the same as for Hermitian systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The global phase and its relation with the projective Hilbert space plays significant role in geometric (topology) properties of Hermitian quantum systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Therefore, it may be interesting to study the geometric properties of a non-Hermitian system in such a point of view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' In order to show such discussions clearly, we employ a two-level system, describing physics of two coupled sites with gain and loss, of which the counterpart Hermitian system also plays a role in illustrating the geometric properties of quantum systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The time evolution of such a two-level system is described by a 2 × 2 matrix Hamiltonian system by the following equation, i d dt � a b � = � H11 H12 H21 H22 � � a b � , (C4) Then following the definition |ψ(t)⟩ = eµ+iν| ˜ψ(t)⟩, one can get d dt(iµ − ν)˜a + i d dt ˜a = H11˜a + H12˜b, (C5) d dt(iµ − ν)˜b + i d dt ˜b = H21˜a + H22˜b, (C6) Combining with their complex conjugations, and considering |˜a|2 + |˜b|2 = 1, we can easily verify the equations (C2) and (C3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' For convenience and without losing generality, we then construct the vector in the projective space for a state |ψ(t)⟩ = � a b � with | ˜ψ(t)⟩ = � ˜aeiϕ ˜b � , ˜a = a √ |a|2+|b|2 , ˜b = b √ |a|2+|b|2 , and ϕ = arg(a) − arg(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' By denoting z = |b|2 − |a|2 which is just the relative population difference of the two levels, it then can be mapped to a sphere, the so-called Bloch sphere, with the coordinates (ϕ, z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' From Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (C3), we can obtain the evolution of the total phase d dtβ = −1/2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + 1/2(1 − z)dϕ dt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' (C7) This equation is the same as what had been obtained for Hermitian systems by Aharonov and Anandan excepting that in the dynamic part Hermitian Hamiltonian ˆH is replaced by ( ˆH + ˆH†)/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' The second part in the right hand of the above equation is known as the geometric part.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' One can easily prove that, if the trace of the evolution is closed in the projective space, the geometric phase just equals to the half of solid angle of the close path on the Bloch sphere, which is just the so-called AA phase, the geometric phase of cyclic state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [1] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Landau, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Sowjetunion 2 , 46 (1932).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [2] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zener and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fowler, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 137, 696 11 (1932).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [3] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Stueckelberg, Helv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys Acta 5, 369 (1932).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [4] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Landau, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Sowjetunion 1 , 88 (1932).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [5] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Shevchenko, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ashhab, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Nori, Physics Reports 492, 1 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [6] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Torosov and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Vitanov, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 96, 013845 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [7] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Cao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Tu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zhou, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Xiao, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Guo, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Jiang, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Guo, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 4, 1401 (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [8] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Forster, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Petersen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Manus, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H¨anggi, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Schuh, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wegscheider, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Kohler, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ludwig, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 112, 116803 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [9] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' F¨oldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Benedict, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Pereira, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Peeters, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B 75, 104430 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [10] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Calero, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chudnovsky, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Garanin, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B 72, 024409 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [11] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Cooper and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Yakovenko, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 96, 037001 (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [12] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Banerjee and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Yakovenko, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B 78, 125404 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [13] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Mark, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Kraemer, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Waldburger, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Herbig, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chin, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' N¨agerl, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Grimm, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 99, 113201 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [14] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Du, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wang, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Yu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B 82, 045128 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [15] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Niu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zhao, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Georgakis, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Raizen, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 76, 4504 (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [16] O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Morsch, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M¨uller, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Cristiani, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ciampini, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ari- mondo, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 87, 140402 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [17] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Huber, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Trotzky, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Bloch, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Altman, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 7, 61 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [18] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Cristiani, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Morsch, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M¨uller, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ciampini, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ari- mondo, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 65, 063612 (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [19] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zhang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H¨anggi, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Gong, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 77, 053607 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [20] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' van Ditzhuijzen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Tauschinsky, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' van Linden van den Heuvell, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 80, 063407 (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [21] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ou, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Choi, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wu, and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Niu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 66, 1 (2002), 0105140.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [22] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Duan, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 78, 063621 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [23] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ye, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lee, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zhang, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 80, 013619 (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [24] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ye, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 77, 013402 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [25] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, Journal of Physics B: Atomic, Molecular and Optical Physics 43, 205303 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [26] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 102, 033323 (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 101, 023618 (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 102, 033313 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [27] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ou, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Choi, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wu, and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Niu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 66, 023404 (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [28] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Milburn, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Corney, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wright, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Walls, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 55, 4318 (1997).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [29] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Smerzi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fantoni, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Giovanazzi, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Shenoy, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 79, 4950 (1997).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [30] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Kohler and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Sols, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 89, 060403 (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [31] O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ivakhnenko, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Shevchenko, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Nori, Physics Re- ports 995, 1 (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [32] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wu and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Niu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 61, 023402 (2000).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [33] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 98, 013601 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [34] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' El-Ganainy, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Makris, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Khajavikhan, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Mussli- mani, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rotter, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Christodoulides, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 14, 11 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [35] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ashida, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Gong, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ueda, Advances in Physics 69, 249 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [36] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Miri and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Al`u, Science 363, eaar7709 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [37] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zhu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Sun, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Jing, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chen, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 121, 124501 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [38] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Geng, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Song, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ye, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Duan, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rong, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Du, Science 364, 878 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [39] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Harter, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' de Melo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Joglekar, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Luo, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 10, 855 (2019), arXiv:1608.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='05061.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [40] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Xiong, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Song, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chen, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zhang, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wang, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 104, 063508 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [41] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Xiong, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zhang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Luo, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chen, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 106, 033518 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [42] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Yao and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wang, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 121, 086803 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [43] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Yin, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Jiang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' L¨u, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chen, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 97, 052115 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [44] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lee and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Thomale, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B 99, 201103 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [45] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lee, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Gong, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 124, 250402 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [46] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Huang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Tao, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, Light Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 10 (2021), 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='1038/s41377-021-00464-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [47] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Bender and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Boettcher, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 80, 5243 (1998).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [48] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wong, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 8, 2039 (1967).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [49] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Faisal and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Moloney, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B 16, 3109 (1983).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [50] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Mostafazadeh, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 43, 205 (2002);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 43, 2814 (2002);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 43, 3944 (2002);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 43, 6343 (2002);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 44, 974 (2003);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 45, 932 (2004);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Nuclear Physics B 640, 419 (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [51] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Bender, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Milton, and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Savage, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' D 62, 085001 (2000).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [52] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Bender, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Boettcher, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Jones, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Meisinger, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Simsek, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 291, 197 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [53] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Bender, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Brody, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Jones, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 93, 251601 (2004);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' D 70, 025001 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [54] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Mostafazadeh, Int.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 21, 2553 (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [55] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Bender, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Branchina, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Messina, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' D 85, 085001 (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [56] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Bender and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Milton, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' D 57, 3595 (1998).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [57] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Dorey, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Dunning, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Tateo, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 34, L391 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [58] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Miao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M¨uller-Kirsten, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Park, Journal of High Energy Physics 2003, 038 (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [59] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Jin and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Song, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 80, 052107 (2009);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 85, 012111 (2012);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 44, 375304 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [60] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Minganti, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Miranowicz, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Chhajlany, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Nori, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 100, 062131 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [61] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Longstaff and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Graefe, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 100, 052119 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [62] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Graefe, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Korsch, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Niederle, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 101, 150408 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [63] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Longstaff and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Graefe, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 100, 052119 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [64] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Shen, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Li, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 100, 062514 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [65] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Sun, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 106, 063708 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [66] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Ib´a˜nez and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Muga, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 89, 033403 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [67] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fang, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zheng, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Theor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' 68, 439 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [68] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Cirac, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Lewenstein, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Mølmer, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Zoller, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 57, 1208 (1998).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' [69] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Wang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Fu, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Yi, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} +page_content=' A 75, 045601 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQf1fkT/content/2301.00734v1.pdf'} diff --git a/1tE4T4oBgHgl3EQfaQwc/vector_store/index.pkl b/1tE4T4oBgHgl3EQfaQwc/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ef6a54e606b032abeec6ad85fb2ad1628f9ece43 --- /dev/null +++ b/1tE4T4oBgHgl3EQfaQwc/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ac013d166c2dc030d8f7bd9bf82d690a5bf6753853593a747b5b1f1374c952e +size 177623 diff --git a/29E1T4oBgHgl3EQfAQI2/content/tmp_files/2301.02836v1.pdf.txt b/29E1T4oBgHgl3EQfAQI2/content/tmp_files/2301.02836v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ac4a05a627778d5739ca3e427f17c9002d595b6 --- /dev/null +++ b/29E1T4oBgHgl3EQfAQI2/content/tmp_files/2301.02836v1.pdf.txt @@ -0,0 +1,1606 @@ +Dynamic Local Feature Aggregation for Learning on Point Clouds +Zihao Lia, Pan Gaoa, Hui Yuanb, Ran Weic +aNanjing University of Aeronautics and Astronautics, Nanjing ,China +bShandong University, Jinan, China +cScience and Technology on Electro-optic Control Laboratory, Luoyang, China +Abstract +Existing point cloud learning methods aggregate features from neighbouring points relying on constructing graph in the +spatial domain, which results in feature update for each point based on spatially-fixed neighbours throughout layers. +In this paper, we propose a dynamic feature aggregation (DFA) method that can transfer information by constructing +local graphs in the feature domain without spatial constraints. By finding k-nearest neighbors in the feature domain, +we perform relative position encoding and semantic feature encoding to explore latent position and feature similarity +information, respectively, so that rich local features can be learned. At the same time, we also learn low-dimensional global +features from the original point cloud for enhancing feature representation. Between DFA layers, we dynamically update +the constructed local graph structure, so that we can learn richer information, which greatly improves adaptability +and efficiency. +We demonstrate the superiority of our method by conducting extensive experiments on point cloud +classification and segmentation tasks. Implementation code is available: https://github.com/jiamang/DFA. +Keywords: +dynamic feature aggregation, point cloud, relative position encoding, semantic feature encoding, +classification, segmentation +1. Introduction +The collection of points that express the spatial distri- +bution and surface features of the target is called point +cloud data, which represents the 3D target in an unstruc- +tured form. The point cloud obtained by combining the +laser principle and the photography principle mainly con- +tains three-dimensional position coordinates (X, Y, Z), +laser reflection intensity and color information (R, G, B). +Common point cloud data formats include RGB-D dual- +modality format and Point Cloud space format. +RGB- +D dual-modality data records the color information and +depth information of the surface of the target object. The +Email addresses: pride_19@163.com (Zihao Li), +Pan.Gao@nuaa.edu.cn (Pan Gao), huiyuan@sdu.edu.cn (Hui Yuan), +115946873@qq.com (Ran Wei) +Point Cloud space format records three-dimensional coor- +dinates of the sampling points on the surface of the object, +reflecting the spatial contour information. +Learning features from point clouds often requires a lot +of advanced processing. +Traditional methods proposed +to solve these problems include capturing the geometric +characteristics of point clouds by using the hand-crafted +features [1]. With the breakthrough of convolution neu- +ral network and deep learning, significantly better perfor- +mance is achieved in various tasks of point cloud process- +ing. However, standard deep neural network needs nor- +mative input data, but the point cloud data does not need +to be irregular, and operations such as translation and +rotation will not change its own nature. Some methods +consider converting to a normative 3D grid and then send +the grid into the network for training, but it will cause ad- +Preprint submitted to Journal of LATEX Templates +January 10, 2023 +arXiv:2301.02836v1 [cs.CV] 7 Jan 2023 + +ditional memory occupation and information loss. Point- +net proposed by [2] creates a precedent for learning and +processing directly on the original point cloud, where the +multi-layer perceptron is applied to each point. +However, since Pointnet [2] cannot capture the contex- +tual information, many recent studies have introduced dif- +ferent modules to learn more abundant local structures, +which can be divided into the following categories: +1) +Feature update based on constructing graph structure +[3][4][5][6][7]; 2) Feature pooling based on neighboring +points [8][9][10][11][12]; 3) Convolution based on a series +of kernels [13][14][15][16][17][15][18][19]; 4) Learning based +on attention mechanism [20][21][22][23]. These methods +have achieved good results in classification and segmen- +tation, but the construction of local feature learners and +calculation of attention weight have very expensive com- +puting cost and memory occupation. In addition, the fea- +ture extractors proposed by some methods are not efficient +enough, and there are many parts worth improving. +The goal of this paper is to design an efficient local +feature extractor without adding much complexity, and +then use the learned efficient features to represent objects, +which will improve the point cloud classification and seg- +mentation tasks. +So we propose a dynamic feature ag- +gregation (DFA) module, which extracts and learns latent +features by finding k-nearest neighbors in the feature do- +main, encoding location information and semantic feature +information simultaneously, and concatenating these two +parts. +In the classification and segmentation task, this +module is stacked to extract rich local features. +Using +the network structure like Pointnet [2], we extract low- +dimensional global features from the initial point cloud, +and then concatenate them with local features extracted +by multiple DFAs. Finally, high-dimensional global fea- +tures are obtained for classification and segmentation. For +segmentation, we concatenate the high-dimensional global +features again with local features, and perform the MLP +operation to predict the category of each point. +In general, we design an efficient local feature extrac- +tor that utilizes multi-level and multi-source features to +effectively characterize objects. +Multi-level features are +reflected in that by stacking several layers of DFA, we can +gradually obtain deeper contextual features. Multi-source +features are reflected in that we combine multiple types of +features of location information, feature differences, fea- +tures themselves, and low-dimensional global features to +perform deeper and higher-dimensional feature learning. +In order to test its efficiency, we have done relevant tests +on the ModelNet40 [24], shapeNet [25] and S3DIS [26] +datasets. Furthermore, we also do many visualization re- +sults and ablation experiments. Our main contributions +are summarized as follows: +• We propose a new operation DFA, which finds k- +nearest neighbors in the feature domain to construct +a local graph structure for feature aggregation at each +time. The graph between DFA layers is dynamically +updated, which is more adaptable. +• In each DFA layer, we can learn rich latent position +and feature difference information through proposed +relative position encoding and semantic feature en- +coding, respectively. To the best of our knowledge, +simultaneously aggregating the relative position and +feature information in the feature domain has not +been studied before. +• We make full use of the learned local features and low- +dimensional global features for point cloud classifica- +tion and segmentation tasks, and test on benchmark +datasets with outstanding quantitative and qualita- +tive results. +2. Related work +2.1. Voxel-based Network. +Converting point cloud data into regular voxel structure +can preserve and express spatial distribution. In 2016, Qi +2 + +et al. [27] improved voxel CNN and proposed two differ- +ent voxel CNN network structures. Afterwards, Tchapmi +et al. [28] jointly proposed segcloud based on voxel-based +3D full convolution neural network and point based con- +ditional random field. Wang et al. [29] proposed O-CNN. +Its core idea is to use octree to represent 3D shapes, and +only the sparse octree occupied by the shape boundary +is subject to CNN operation. In order to effectively en- +code the distribution of voxel midpoint, Meng et al. [30] +proposed the voxel variational self encoder network VV- +net, and the point distribution in each voxel is captured +by the self encoder. In 2020, Shao et al. [31] proposed +the data structure of opportunity space hash, designed +hash2col and col2hash, so that CNN operations such as +convolution and pooling can be parallelized. +2.2. View-based Network. +Usually, the point cloud is projected into the 2D image +first, and then the 2D CNN is used to extract the image +features. Due to the limitations of the existing deep learn- +ing network, this kind of method can only recognize the +point cloud model from a specific angle. In 2017, Lawin et +al. [32] generated images with different pitch angles and +translation distances by controlling the equidistant angle. +Snapnet-r proposed by Gueery et al. [33] can use 2D im- +ages and 3D as spatial structure information at the same +time. The mvpnet proposed by Jaritz et al. [34] in 2019 +can aggregate 2D image features into 3D. The relationship +network proposed by Yang et al. [35] comprehensively con- +siders the relationship between different views and regions, +and also uses the attention mechanism to generate scores +to reflect the relative discrimination ability of views. +2.3. Point-based Network. +Direct processing of point clouds contains complete orig- +inal information. Qi et al. [2] proposed Pointnet network, +which is the first deep neural network to directly process +disordered point clouds. Since it does not consider local +features, they [36] further proposed Pointnet++ to extract +local features at multiple levels. Later Atzmon et al. [37] +proposed point convolution neural network, which uses ex- +pansion operator and constraint operator to generate con- +volution. In response to the problem of inflexibility of fixed +grids, Thomas et al. [19] proposed KPconv, which is lo- +cated in Euclidean space and is very effective in classifying +point clouds with different densities. In addition, Point- +Conv [15] and PointCNN [38] use 3D convolution kernels to +extract features instead of sharing MLP. The PointConv +[15] can be extended to deconvolution to achieve better +segmentation results. And PointCNN [38] introduced the +x-transform to rearrange the points into a potentially regu- +lar order, and then use convolution to extract local features +from the point cloud. +Graph-based Methods. By constructing a local or global +graph structure to update delivery messages and learn fea- +tures. In general, the graph structure of the spatial domain +relies on finding k-nearest neighbors for message passing, +and the graph structure of the spectral domain needs to +be realized by methods such as Laplace matrix spectral +decomposition and Chebyshev polynomial approximation. +KCNet [4] defines a point set kernel as a set of learnable +3D points. It aggregates repetitive features at 3D locations +on the nearest neighbor graph based on geometric rela- +tionships and local high-dimensional features measured by +kernel correlations. Wang et al. [5] proposed DGCNN to +learn the embedding of edges by constructing local graphs. +Unlike DGCNN [5], 3DGCN [39] defines learnable ker- +nels using graph max pooling mechanism, and introduces +shift invariance and scale invariance into deep learning net- +works. DeepGCNs [40] uses residual connections and di- +lated convolutions to train deeper graph structures, and +experiments confirm the positive effect of depth. +Transformer-based Methods. Since the great success of +transformers in the NLP field, a lot of work has also in- +troduced attention mechanisms to related tasks in point +clouds recently. PCT [41] adopts a similar architecture to +3 + +Concat +𝑓�′ +Pool +𝑥��, 𝑓�� +𝑥�, 𝑓� +𝑥��, 𝑓�� +𝑥��, 𝑓�� +𝑥��, 𝑓�� +𝑥��, 𝑓�� +ℎ��� +ℎ��� +ℎ��� +ℎ��� +ℎ��� +Feature Potential Encoding(FeaPE) +Concat +MLP +shared +ℎ��� +ℎ��� +ℎ��� +ℎ��� +ℎ��� +ℎ�� +shared +𝐸�� + 𝑥�- 𝑥�� + 𝑥� + 𝑥�� +MLP +𝐸��� + 𝑥�- 𝑥�� + 𝑥� + 𝑥�� +𝐸�� + 𝑥�- 𝑥�� + 𝑥� + 𝑥�� +ℎ�� +ℎ�� +Relative position encoding +ℎ�� +Concat +(𝑓�- 𝑓��) +𝑓� +(𝑓�- 𝑓��) +𝑓� +(𝑓�- 𝑓��) +𝑓� +ℎ�� +ℎ�� +Semantic feature encoding +ℎ�� +ℎ�� +ℎ�� +ℎ�� +ℎ�� +ℎ�� +ℎ�� +ℎ�� +ℎ�� +ℎ�� +Figure 1: Illustration of feature extraction by DFA layer. The color closeness represents the adjacent points in the feature domain rather than +the spatial neighbors. Rich information is obtained through relative position encoding and semantic feature encoding. The edge features of +each adjacent point are obtained by sharing MLP, and finally the features of the central point are updated by maximum pooling operation. +The subscript j1 · · · j5 index the feature-domain neighbors for center xi. +pointnet [2], using neighbor information embedding, and +improved offset transformer for feature learning, so that it +has achieved good results in classification and segmenta- +tion tasks. Similarly, there are also some research works +based on the pointnet++ [36] network, such as PT [42] +and BL-Net [43] . The PT [42] proposed by Zhao et al. +is to add a layer of transformer to extract features after +each downsampling or upsampling. The transformer has +been modified to measure the difference between the cor- +responding channels between two eigenvectors (Q and K). +BL-Net [43] newly designed position feedback module to +perform feature-guided point shifting. In addition, Yan et +al. [44] also used the attention mechanism and proposed +PointASNL that can effectively process point clouds with +noise. +3. Methodology +Extracting and utilizing effective features is crucial in +point cloud tasks. We construct a local graph structure +through dynamic updating, and the information can dif- +fuse nonlocally in the whole point cloud. Based on the +graph structure, we explore both the latent location and +semantic features of different layers. Further, we make full +use of global features and local features containing detailed +information. We describe the operation called Dynamic +Feature Aggregation (DFA) in Section 3.1, and then the +network structure is introduced in Section 3.2. +3.1. Dynamic Feature Aggregation +We +define +the +input +point +cloud +as +X += +{xi|i = 1, 2, ..., N} +∈ +RN×3 +with +the +corresponding +features defined as F = {fi|i = 1, 2, ...N} ∈ RN×D. Here +xi represents the three-dimensional coordinates (x, y, z) +of the i-th point. As the input point cloud only contain +three-dimensional coordinates, the geometry coordinates +can also be regarded as its initial feature. +When extracting features at each layer, a local graph +needs to be dynamically constructed, which is defined +as G = (V, E), where V = {1, 2, ...n} and E ⊆ V × V +are the vertices and edges, respectively. We construct a +local graph structure by finding k-nearest neighbors in +the feature domain, including self-loops. +Suppose that +xi is the center point of the graph structure, and then +N(i) = {j : (i, j) ∈ E} is the neighboring point in the fea- +ture domain. Specifically, the similarity of features is cal- +culated and measured in the same way as Euclidean space +4 + +DFA +(64) +DFA +(64) +DFA +(64) +DFA +(64) +N , 3 +N , 64 +N , 64 +N , 64 +N , 64 +⊕ +N , 1024 +Pool +1024 +⊕ +N , 1024 +1024 +Pool +Pointnet(64) +Pointnet(64) +N , 1280 +⊕ +repeat +⊕ +Classification +Segmentation +Model Architecture +⊕ +Categorical +vector +MLP +MLP +(N,192) +(N,256) +Spatial +transform +(64) +MLP +MLP +(512,256,c) +(512,256,p) +Figure 2: DFA based network architectures for classification and segmentation tasks. ⊕ stands for concatenated operations. The spatial +transformation is designed to compute a 3 × 3 matrix to align the input point cloud to the canonical space. By concatenating local features +and low-dimensional global features through MLP and max pooling, 1D global descriptors can be generated for classification tasks. For part +segmentation, we generate 1024-dimensional global features, fuse the category feature vectors, and then concatenate the detailed local features +again to output the category score of each point through MLP. +distance in each feature dimension, and the k points with +the smallest value are selected as the nearest neighbors. +Then retrieve the 3D coordinates of each nearest neigh- +bor. Given the input three-dimensional coordinates and +D-dimensional features, our purpose is to learn and output +M-dimensional features with the same number of points +through the DFA layer. +Because we establish the connection between the center +point and the surrounding k-nearest neighbors by build- +ing a local graph structure, so we define the feature of the +edge as eij = hΘ(fi, fj) , where hΘ : RD × RD → RM +is a nonlinear function with a set of learnable parameters +Θ. Finally, we aggregate the edge features of the k near- +est neighbors along each channel, and obtain the result +for each center point fi that enters the DFA layer feature +extraction, which is defined as follows: +f +′ +i = +Π +j∈N(i)hΘ(fi, fj) +(1) +Semantic Feature Encoding. We choose to find k- +nearest neighbors in the feature domain, which means that +the points sharing the same class will have high probabil- +ity to be connected. Then we concatenate the feature of +the center point and the feature differences with its neigh- +bors as semantic feature information. +Because this not +only includes the features of all the original center points, +but also transmits information to the surrounding points +through the feature difference with the neighbors. And we +define the encoding as follows: +hfj = fi ⊕ (fi − fj), j ∈ N(i) +(2) +Here, ⊕ is the concatenate operation. We calculate and +concatenate the feature differences and its own features +along each dimension, aiming to encode semantically sim- +ilar features and explore their latent information. +Relative Position Encoding. We first need to store +the original 3-dimensional position coordinate, and then +find the latent position information of the corresponding +nearest neighbors in the feature domain for each center +point. +We use the relative position information of the +neighboring points to encode as follows: +hxj = MLP(xi⊕xj⊕(xi−xj)⊕ ∥ xi−xj ∥), j ∈ N(i) (3) +where xi and xj represent the original three-dimensional +coordinates, (xi − xj) calculate the relative coordinates of +the center point and the k-nearest neighbors of the fea- +ture domain , ⊕ is the concatenate operation, and ∥ · ∥ +5 + +calculates the Euclidean distance between the neighbours +and center point. Unlike finding the nearest neighbors in +the space restricted by geometry distance, we can discover +more latent location information in the feature domain +that may have similar semantic feature but with larger +geometry distance. +When obtaining the position and semantic embedding, +we can concatenate these two parts first and then extract +the edge features through the MLP operation: +hij = MLP(hxj ⊕ hfj), j ∈ N(i) +(4) +Finally, we need to consider how to aggregate the fea- +tures of the neighboring edges, that is Π in (1). We have +three options for the over-aggregation Π. The first is to +maximize the pool of edge features learned by all nearest +neighbors to obtain the features of the center point. The +second is to add all edge features. The third is to perform +softmax on the neighbors to obtain a weight coefficient +Wij, and then multiply it with each edge feature, that +is, Wij × hij to obtain the attentive edge feature, and fi- +nally add and update the features of the center point. The +experimental results show that the first maximum pool- +ing has the best performance, so we choose the maximum +pooling to aggregate all edge features. +3.2. Network Architecture +We use the proposed DFA layer to design two network +architectures for the point cloud classification and segmen- +tation task as shown in Fig. 2. We send the initial point +cloud into a spatial transformation network similar to the +Pointnet [2] network. By learning the position information +of the point cloud itself, we can learn a rotation matrix +that is most conducive to the classification or segmenta- +tion. +The point clouds are multiplied and fed into our +stacked DFA layer to extract features. +Local and Global Information Aggregation. Fo- +cusing only on the global features obtained by pooling on +each point ignores the local interaction between points. +Or only focusing on local features of surrounding points +is one-sided. Therefore, we choose a combination of local +features and global features to comprehensively learn the +information contained in the point cloud, so that it can be +better used in classification and segmentation tasks. Our +local features are learned by several layers of DFA, and the +lower-dimensional global features is obtained similarly to +Pointnet [2] by using shared MLP and max pooling. Our +ablation experiments have also confirmed that integration +with global feature is beneficial. On the other hand, we +set several local features and low-dimensional global fea- +tures to the same dimension (64) because we think they +are equally important, which is also confirmed in practice. +Classification Network. +Our classification network +is shown in the upper part of Fig. 2, and the point cloud +through the spatial transformation network is sequentially +passed through four DFA to extract local features. The +input of each layer is the output of the previous layer. We +concatenate these four local features and the global fea- +tures extracted from the initial point cloud, and then con- +vert them to higher dimensions through MLP operations. +Finally, global features are obtained by max pooling for +classification prediction. +Segmentation Network. Our segmentation network +is similar to the classification network, as shown in the +lower part of Fig. +2. +We pass the transformed point +cloud through three DFA layers in sequence. The three +local features and low-dimensional global features are also +concatenated to obtain a 1024-dimensional global features +through MLP and max pooling. If it is part segmenta- +tion, then we add a category feature vector (64). If it is +semantic segmentation, it will not be added. Finally we +use the shared MLP to resize the features and predict the +semantic label for each point. +Dynamic Graph Update. +Depending on the spa- +tial interaction of the point cloud, locally adjacent parts +can form subsets. However, considering the spatial neigh- +bors for graph update sometimes leads to failure of fea- +6 + +ture aggregation. For example, for the point clouds of air +plane, the aircraft wing and fuselage are adjacent in space, +the mutually updated features are useless. So we use the +point of finding k-nearest neighbors on the feature domain, +which means that these points can constitute meaningful +parts. Each time we find neighbors in the feature domain +to reconstruct the local graph structure. It can be said +that our graph is dynamically updated, so we can explore +more latent location information, which is also a limitation +that cannot be achieved by doing k-nearest neighbors in +space. +4. Experiments +In this section, we evaluate our models using DFA for +point cloud classification and part segmentation tasks. +Methods +Input +point +mAcc +OA +Pointnet[2] +xyz +1k +86.0 +89.2 +Pointnet++[36] +xyz +1k +- +90.7 +Pointnet++[36] +xyz,normal +5k +- +91.9 +SpiderCNN[45] +xyz,normal +1k +- +92.4 +PointWeb[12] +xyz,normal +1k +89.4 +92.3 +PointCNN[38] +xyz +1k +88.1 +92.2 +DGCNN[5] +xyz +1k +90.2 +92.2 +Point2Sequence[46] +xyz +1k +90.4 +92.6 +FPConv[47] +xyz,normal +1k +- +92.5 +PointConv[15] +xyz,normal +1k +- +92.5 +KPConv[19] +xyz +6k +- +92.9 +Point2Node [48] +xyz +1k +- +93.0 +PointASNL[44] +xyz +1k +- +92.9 +PointASNL[44] +xyz,normal +1k +- +93.2 +PCT[41] +xyz +1k +- +93.2 +SO-Net[8] +xyz,normal +5k +90.8 +93.4 +BL-Net[43] +xyz +1k +- +93.5 +AG-conv[49] +xyz +1k +90.7 +93.4 +PointStack[50] +xyz +1k +89.6 +93.3 +Ours(1024 points) +xyz +1k +91.1 +93.6 +Ours(2048 points) +xyz +2k +91.6 +94.0 +Table 1: Classification results on ModelNet40. +4.1. Classification +Data. We evaluate our point cloud classification model +on the ModelNet40 [24] dataset. +This dataset contains +12311 mesh CAD models from 40 categories, where 9843 +models are used for training and 2468 models are used for +testing. We follow the experimental setting of [2]. We uni- +formly sample 1024 or 2048 points for each model, each +using only 3D coordinates (x, y, z) as input. +Data aug- +mentation operations include point shifting, scaling and +perturbing of the points. +Network Configuration. The network architecture is +shown in Fig. 2. At each layer we recompute the graph +based on feature similarity. For the 1024 points we set the +number of nearest neighbors k value to 20, and to maintain +the same density, we set k to 40 for the 2048 points. We +use four DFA layers to extract local geometric features and +a Pointnet-like structure to extract low-dimensional global +features. These are implemented using fully connected lay- +ers (64). We connect the extracted multi-layer features to +obtain 64×5 = 320-dimensional features. Then the global +features are obtained, and then two fully connected layers +are used to transform the global features for classification. +All layers use LeakyReLU and batch normalization. We +use the SGD optimizer with momentum of 0.9. The initial +learning rate is 0.1, and the random drop rate of the fully +connected layer is 0.5 to prevent overfitting. The batch size +is set to 32. We use Pytorch implementation and train the +network on two RTX 2080Ti GPUs. +Results. Table 1 shows the results of the classification +task, and the evaluation metrics we use on this dataset +are the average class accuracy and overall accuracy. Our +network only feeds 3D coordinates into training, which +contains less raw information, but achieves the best re- +sults on this dataset. +The test result of 2048 sampling +points is better than that of 1024 points, indicating that +when more original information is included, our network +can learn more features and have better performance. +7 + +PointNet +DGCNN + AG-conv + ours +ground truth +Figure 3: Visual comparison of four methods for part segmentation. +Methods +mIou +air. +bag +cap +car +cha. +ear. +gui. +kni. +lam. +lap. +mot. +mug +pis. +roc. +ska. +tab. +NUM +2690 +76 +55 +898 +3758 +69 +787 +392 +1547 +451 +202 +184 +283 +66 +152 +5271 +Pointnet[2] +83.7 +83.4 +78.7 +82.5 +74.9 +89.6 +73.0 +91.5 +85.9 +80.8 +95.3 +65.2 +93.0 +81.2 +57.9 +72.8 +80.6 +Pointnet++[36] +85.1 +82.4 +79.0 +87.7 +77.3 +90.8 +71.8 +91.0 +85.9 +83.7 +95.3 +71.6 +94.1 +81.3 +58.7 +76.4 +82.6 +SO-Net[8] +84.9 +82.8 +77.8 +88.0 +77.3 +90.6 +73.5 +90.7 +83.9 +82.8 +94.8 +69.1 +94.2 +80.9 +53.1 +72.9 +83.0 +RGCNN[51] +84.3 +80.2 +82.8 +92.6 +75.3 +89.2 +73.7 +91.3 +88.4 +83.3 +96.0 +63.9 +95.7 +60.9 +44.6 +72.9 +80.4 +DGCNN[5] +85.2 +84.0 +83.4 +86.7 +77.8 +90.6 +74.7 +91.2 +87.5 +82.8 +95.7 +66.3 +94.9 +81.1 +63.5 +74.5 +82.6 +PCNN[37] +85.1 +82.4 +80.1 +85.5 +79.5 +90.8 +73.2 +91.3 +86.0 +85.0 +96.7 +73.2 +94.8 +83.3 +51.0 +75.0 +81.8 +3D-GCN[39] +85.1 +83.1 +84.0 +86.6 +77.5 +90.3 +74.1 +90.9 +86.4 +83.8 +95.3 +65.2 +93.0 +81.2 +59.6 +75.7 +82.8 +PointASNL[44] +86.1 +84.1 +84.7 +87.9 +79.7 +92.2 +73.7 +91.0 +87.2 +84.2 +95.8 +74.4 +95.2 +81.0 +63.0 +76.3 +83.2 +PRA-Net[52] +86.3 +84.4 +86.8 +89.5 +78.4 +91.4 +76.4 +91.5 +88.2 +85.3 +95.7 +73.4 +94.8 +82.1 +62.3 +75.5 +84.0 +Ours +86.0 +85.4 +80.0 +85.8 +80.6 +92.4 +74.1 +92.0 +87.4 +84.6 +95.6 +73.5 +94.4 +83.9 +59.0 +74.0 +83.2 +Table 2: Part segmentation results on ShapeNet dataset. Metric is mIoU(%). +4.2. Part Segmentation +Data. We test our model on the ShapeNet dataset [25] +for point cloud part segmentation. This dataset contains +16881 shapes in 16 categories, of which 14006 are used +for training and 2874 are used for testing. There are 50 +parts tags in total, and each model includes 2-6 parts. +We follow the experimental setup of [2]. 2048 points are +sampled from each shape, and the input consists only of +the 3D coordinates. +Network Configuration. We use three DFA layers to +extract features, and operate the same as classification to +obtain 1024-dimensional global features. Following [5], we +also add a one-hot vector representing the category type +to each point. Then we concatenate global features and +category vectors as new global features with 1024 + 64 = +1088-dimensions. +We re-concatenate the previous three +local features and convert them into the features of each +point through three fully connected layers (512, 256, 128) +for segmentation. The settings of our training parameters +are the same as in the classification task, except that the +batch size is changed to 16. +Results. We evaluate the performance of part segmen- +tation by the mIou metric. The Iou of a shape is computed +by averaging of each part. The mean Iou (mIou) is calcu- +lated by averaging the Ious of all testing instances. From +the experimental results in table 2, it can be seen that +8 + +Methods +mAcc +mIou +ceiling +floor +wall +beam +column +windows +door +chair +table +bookcase +sofa +board +clutter +Pointnet[2] +48.98 +41.09 +88.80 +97.33 +69.80 +0.05 +3.92 +46.26 +10.76 +58.93 +52.61 +5.85 +40.28 +26.38 +33.22 +SEGCloud[28] +57.35 +48.92 +90.06 +96.05 +69.86 +0.00 +18.37 +38.35 +23.12 +70.40 +75.89 +40.88 +58.42 +12.96 +41.60 +PointCNN[38] +63.86 +57.26 +92.31 +98.24 +79.41 +0.00 +17.60 +22.77 +62.09 +74.39 +80.59 +31.67 +66.67 +62.05 +56.74 +PointWeb[12] +66.64 +60.28 +91.95 +98.48 +79.39 +0.00 +21.11 +59.72 +34.81 +76.33 +88.27 +46.89 +69.30 +64.91 +52.46 +SPG[53] +66.50 +58.04 +89.35 +96.87 +78.12 +0.00 +42.81 +48.93 +61.58 +84.66 +75.41 +69.84 +52.60 +2.10 +52.22 +PCNN[37] +67.01 +58.27 +92.26 +96.20 +75.89 +0.27 +5.98 +69.49 +63.45 +66.87 +65.63 +47.28 +68.91 +59.10 +46.22 +PCT[41] +67.65 +61.33 +92.54 +98.42 +80.63 +0.00 +19.35 +61.64 +48.00 +76.58 +85.20 +46.22 +67.71 +67.93 +52.29 +Ours +67.96 +62.18 +92.68 +98.50 +79.12 +0.05 +36.72 +67.45 +65.18 +75.36 +86.77 +71.52 +52.59 +65.02 +57.12 +Table 3: Semantic segmentation results on S3DIS dataset. +Pointnet +DGCNN + ours + ground truth +Figure 4: Visual comparison of three methods for semantic segmentation. +in some categories with a small number of samples, the +segmentation effect is not good due to too few training +samples. But overall, our method has better performance, +especially with the highest mIou in many categories such +as airplane, car, chair, etc. This benefits from these cat- +egories having sufficient samples so that our network can +learn rich features for part segmentation tasks. +Fig. +3 +shows the visual differences between us and several other +mainstream methods on some categories. These methods +are roughly capable of distinguishing different parts of an +object, and the difference lies in the identification of de- +tails. Looking closely at the tail section of the airplane, +the fence section below the chair, the top of the car, and +the connection between different parts in the guitar, our +method is closer to the ground truth. +4.3. Semantic Segmentation +Data. We further test our model on the Stanford Large- +Scale 3D Indoor Spaces Dataset (S3DIS) dataset [26] for +point cloud semantic scene segmentation. +This dataset +is taken from 271 rooms in 6 different areas in 3 differ- +ent buildings. +The point cloud data of each scene has +9-dimensional data including xyz three-dimensional coor- +dinates, RGB color information, and the normalized posi- +tion coordinates x′y′z′ of each point relative to the room +where it is located. At the same time, each point cloud in +the scene is assigned a semantic label from 13 categories +9 + +(such as ceiling, table, etc.). +Network Configuration. Our semantic segmentation +network configuration is the same as for part segmentation, +the only difference is that no feature vector is added. +Results. We divide each room into 1m × 1m blocks and +sample 4096 points in each block during training. And we +use area5 as the test set. For evaluation metrics, we use +mean class accuracy (mAcc) and mean class intersection +(mIou). The experimental results are shown in the table +3, and the visualization is shown in the fig. 4. +4.4. Ablation Studies +In this subsection, we explore the effect of using different +choices in the network. The effectiveness of our module +and parameter selection is demonstrated in these ablation +experiments. +Number of neighbors. The k value of constructing +the local graph structure has a great influence on the ex- +tracted features. Therefore, it is very important to choose +an appropriate value of k in the experiment. We conducted +4 sets of experiments to explore the impact of choosing dif- +ferent k values on the classification results of 2048 points, +which is also shown in the table 4. When the value of k is +10 and 20, the neighborhood of each center point is small +and cannot fully interact with the neighbor points. Appro- +priately increasing the value of k can also have room for +improvement, which also shows that DFA can effectively +use the features of neighborhood points to learn local fea- +tures. By further increasing the value of k, it can be found +that increasing the value of k all the time will not increase +the accuracy of the model. Because when the value of k +is too large, there will be many noise points that are very +different from the center point features, which is useless or +even burdensome for updating the center point features, +and will also increase the amount of parameters and net- +work training time. Choosing a neighbor k value of 40 can +obtain the best average class accuracy and overall accu- +racy. +k +mAcc +OA +10 +90.2 +93.3 +20 +90.8 +93.7 +40 +91.6 +94.0 +60 +91.5 +93.3 +Table 4: Number of neighbors(k) +Selection of aggregate functions Π. It can be seen in +many previous works[2][36][41] that some symmetric pool- +ing functions such as max/sum/mean are often used to +overcome the disordered characteristics of point clouds. +In our DFA layer, we also need to aggregate edge features +to update features for each center point. We experimented +with different aggregation functions such as max, sum, or +sum with attention weights which first do softmax on k- +nearest neighbors dimension to get the attention weights +and then multiply and accumulate them accordingly. The +max function is to select the largest feature of points in +the local neighborhood. The sum function is to add the +features of all points in the neighborhood, and the mean +function is to divide by the k value after the sum func- +tion. Table 5 shows the results of our selection of differ- +ent aggregation functions on a classification experiment of +2048 points. Although the maximum pooling function will +lose the non-largest part of the features, it will retain the +largest part of the most significant features, and the ex- +perimental results show that it is the most effective. We +finally choose the best-performing max function to aggre- +gate the edge features. +Π +mAcc +OA +max +91.6 +94.0 +sum +90.5 +93.4 +mean +90.3 +93.2 +attention sum +91.0 +93.5 +Table 5: Choice of different aggregation functions Π +10 + +Feature or space domains. Further, we explore in +which domain is better to compute k-nearest neighbors, +i.e., the feature domain or the spatial domain. If we choose +to do k-nearest neighbors in the spatial domain, it means +that the graph structure is fixed each time. On the one +hand, the relative position coding will be the same, on +the other hand, it is very limited to exchange information +with fixed neighbor points each time. If we choose to do +k-nearest neighbors on the feature domain, it means that +the local graph structure is dynamically updated, and the +neighbors of the graph are different each time but the fea- +tures are similar. We can make better use of DFA layers to +discover efficient features. We choose to compare the ex- +perimental results in the classification task of 2048 points. +As can be seen from the table 6, our way of exchanging +information with neighbor updates in the feature domain +is better. Because the k-nearest neighbors obtained in this +way are more homogeneous. Especially for part segmen- +tation, spatially adjacent points are not necessarily of the +same class, so it is useless or even redundant to exchange +information with these points. +spatial or feature domain +mAcc +OA +feature +91.6 +94.0 +spatial +91.1 +93.4 +Table 6: Comparison of k-nearest neighbors in feature domain and +space. +Relative position information. By computing the +k-nearest neighbors of the feature domain, we are able to +discover latent-location feature information that is not lim- +ited by space. In this way, the relative position encoding +in each DFA layer is different because the neighborhood +points are changing. This allows us to connect points that +may not be in close spatial locations. So we explore its ef- +fectiveness by whether incorporating this part in the clas- +sification task of 2048 points. +The experimental results +in table 7 show that adding location information encoding +can have better performance. This also shows that the po- +tential position information obtained by relative position +encoding is crucial. +Position information +mAcc +OA +w +91.6 +94.0 +w/o +90.1 +93.3 +Table 7: Whether to add position information +Low-dimensional +global +features. +Inspired by +Pointnet [2] and Pointnet++ [36], it is not advisable to +only focus on global features or local features, so we adopt +a fusion of both. Global features can provide overall direc- +tion control, while local features can provide more detailed +information. We believe that these are equally important +in network learning, so after extracting local features of +different depths, we concatenate these local features and +low-dimensional global features together through MLP op- +erations to upgrade to high-dimensional for subsequent +tasks. To this end, we compare the classification results +of 2048 points with or without adding low-dimensional +global features. The table 8 confirms the effectiveness of +our way of concatenating the learned local features and +low-dimensional global features. +Low-global features +mAcc +OA +w +91.6 +94.0 +w/o +89.9 +93.1 +Table 8: Whether to add low-dimensional global features +4.5. Model Complexity +We use the stat package in pytorch to output some quan- +titative results of the network model. It includes the total +number of parameters of the network model, the number +of floating-point operations required for network opera- +tion, and the memory occupied by node inference. The +experimental results are all tested based on the classifi- +cation model on 1024 points. At the same time, we test +11 + +other mainstream methods for comparison as shown in the +following table 9. +It can be seen that our model has fewer parameters and +does not occupy a large amount of memory, indicating that +our network structure is lightweight, and not complicated +and easy to implement. In networks based on graph meth- +ods, the amount of computation is generally too large due +to the need to interact with neighbors to update features. +Compared with other methods of this type, our floating- +point operations are also much less. At the same time the +performance is still the best. +Method +Pparams +Flops +Memory +OA +Pointnet[2] +0.7M +0.5M +10.5M +89.2 +Pointnet++[36] +2.2M +3.1M +231.5M +91.9 +DGCNN[5] +1.8M +1.89G +123.0M +92.9 +AG-conv[49] +1.9M +2.9G +202.0M +93.4 +PCT[41] +2.9M +2.32G +187.6M +93.2 +ours +1.1M +2.17G +154.5M +93.6 +Table 9: Quantitative evaluation of classification on ModelNet40. +5. Conclusion +This paper proposes a new operation for point cloud +learning and also demonstrates its performance in differ- +ent tasks. The main contribution of our method is to ag- +gregate local feature in the feature domain, explore the la- +tent relative position information and semantic feature in- +formation, and learn to obtain higher-dimensional features +by concatenating local features and low-dimensional global +features. Our DFA can dynamically construct graphs that +are not spatially correlated and exchange information be- +tween points with semantically similar features. +Exper- +imental results show that our network outperforms the +state-of-the-art on several public datasets. Further, our +DFA module is simple and efficient, and can be seamlessly +integrated into other network models. +References +[1] S. Biasotti, A. Cerri, A. Bronstein, M. Bronstein, Recent trends, +applications, and perspectives in 3d shape similarity assessment, +in: Computer graphics forum, Vol. 35, Wiley Online Library, +2016, pp. 87–119. +[2] C. R. Qi, H. Su, K. Mo, L. J. Guibas, Pointnet: Deep learning on +point sets for 3d classification and segmentation, in: Proceed- +ings of the IEEE conference on computer vision and pattern +recognition, 2017, pp. 652–660. +[3] C. Wang, B. Samari, K. Siddiqi, Local spectral graph convolu- +tion for point set feature learning, in: Proceedings of the Euro- +pean conference on computer vision (ECCV), 2018, pp. 52–66. +[4] Y. Shen, C. Feng, Y. Yang, D. Tian, Mining point cloud local +structures by kernel correlation and graph pooling, in: Proceed- +ings of the IEEE conference on computer vision and pattern +recognition, 2018, pp. 4548–4557. +[5] Y. Wang, Y. Sun, Z. Liu, S. E. Sarma, M. M. Bronstein, J. M. +Solomon, Dynamic graph cnn for learning on point clouds, Acm +Transactions On Graphics (tog) 38 (5) (2019) 1–12. +[6] L. Wang, Y. Huang, Y. Hou, S. Zhang, J. Shan, Graph at- +tention convolution for point cloud semantic segmentation, in: +Proceedings of the IEEE/CVF conference on computer vision +and pattern recognition, 2019, pp. 10296–10305. +[7] J. Liu, B. Ni, C. Li, J. Yang, Q. Tian, Dynamic points agglom- +eration for hierarchical point sets learning, in: Proceedings of +the IEEE/CVF International Conference on Computer Vision, +2019, pp. 7546–7555. +[8] J. Li, B. M. Chen, G. H. Lee, So-net: Self-organizing network for +point cloud analysis, in: Proceedings of the IEEE conference on +computer vision and pattern recognition, 2018, pp. 9397–9406. +[9] A. Mnih, K. Gregor, Neural variational inference and learn- +ing in belief networks, in: International Conference on Machine +Learning, PMLR, 2014, pp. 1791–1799. +[10] Q. Huang, W. Wang, U. Neumann, Recurrent slice networks for +3d segmentation of point clouds, in: Proceedings of the IEEE +conference on computer vision and pattern recognition, 2018, +pp. 2626–2635. +[11] Z. Zhang, B.-S. Hua, S.-K. Yeung, Shellnet: +Efficient point +cloud convolutional neural networks using concentric shells +statistics, in: Proceedings of the IEEE/CVF international con- +ference on computer vision, 2019, pp. 1607–1616. +[12] H. Zhao, L. Jiang, C.-W. Fu, J. Jia, Pointweb: Enhancing lo- +cal neighborhood features for point cloud processing, in: Pro- +ceedings of the IEEE/CVF conference on computer vision and +pattern recognition, 2019, pp. 5565–5573. +[13] H. Su, V. Jampani, D. Sun, S. Maji, E. Kalogerakis, M.-H. +Yang, J. Kautz, Splatnet: +Sparse lattice networks for point +12 + +cloud processing, in: Proceedings of the IEEE conference on +computer vision and pattern recognition, 2018, pp. 2530–2539. +[14] B.-S. Hua, M.-K. Tran, S.-K. Yeung, Pointwise convolutional +neural networks, in: Proceedings of the IEEE conference on +computer vision and pattern recognition, 2018, pp. 984–993. +[15] W. Wu, Z. Qi, L. Fuxin, Pointconv: Deep convolutional net- +works on 3d point clouds, in: Proceedings of the IEEE/CVF +Conference on Computer Vision and Pattern Recognition, 2019, +pp. 9621–9630. +[16] S. Lan, R. Yu, G. Yu, L. S. Davis, Modeling local geometric +structure of 3d point clouds using geo-cnn, in: Proceedings of +the IEEE/cvf conference on computer vision and pattern recog- +nition, 2019, pp. 998–1008. +[17] A. Komarichev, Z. Zhong, J. Hua, A-cnn: Annularly convolu- +tional neural networks on point clouds, in: Proceedings of the +IEEE/CVF conference on computer vision and pattern recog- +nition, 2019, pp. 7421–7430. +[18] J. Mao, X. Wang, H. Li, Interpolated convolutional networks +for 3d point cloud understanding, in: +Proceedings of the +IEEE/CVF international conference on computer vision, 2019, +pp. 1578–1587. +[19] H. +Thomas, +C. +R. +Qi, +J.-E. +Deschaud, +B. +Marcotegui, +F. Goulette, L. J. Guibas, Kpconv: Flexible and deformable +convolution for point clouds, in: Proceedings of the IEEE/CVF +international conference on computer vision, 2019, pp. 6411– +6420. +[20] A. Paigwar, O. Erkent, C. Wolf, C. Laugier, Attentional point- +net for 3d-object detection in point clouds, in: Proceedings of +the IEEE/CVF Conference on Computer Vision and Pattern +Recognition Workshops, 2019, pp. 0–0. +[21] S. Xie, S. Liu, Z. Chen, Z. Tu, Attentional shapecontextnet for +point cloud recognition, in: Proceedings of the IEEE conference +on computer vision and pattern recognition, 2018, pp. 4606– +4615. +[22] W. Zhang, C. Xiao, Pcan: +3d attention map learning using +contextual information for point cloud based retrieval, in: Pro- +ceedings of the IEEE/CVF Conference on Computer Vision and +Pattern Recognition, 2019, pp. 12436–12445. +[23] J. Yang, Q. Zhang, B. Ni, L. Li, J. Liu, M. Zhou, Q. Tian, +Modeling point clouds with self-attention and gumbel subset +sampling, in: Proceedings of the IEEE/CVF conference on com- +puter vision and pattern recognition, 2019, pp. 3323–3332. +[24] Z. Wu, S. Song, A. Khosla, F. Yu, L. Zhang, X. Tang, J. Xiao, +3d shapenets: A deep representation for volumetric shapes, in: +Proceedings of the IEEE conference on computer vision and +pattern recognition, 2015, pp. 1912–1920. +[25] L. Yi, V. G. Kim, D. Ceylan, I.-C. Shen, M. Yan, H. Su, C. Lu, +Q. Huang, A. Sheffer, L. Guibas, A scalable active framework +for region annotation in 3d shape collections, ACM Transactions +on Graphics (ToG) 35 (6) (2016) 1–12. +[26] I. Armeni, O. Sener, A. R. Zamir, H. Jiang, I. Brilakis, M. Fis- +cher, S. Savarese, 3d semantic parsing of large-scale indoor +spaces, in: Proceedings of the IEEE conference on computer +vision and pattern recognition, 2016, pp. 1534–1543. +[27] C. R. Qi, H. Su, M. Nießner, A. Dai, M. Yan, L. J. Guibas, Vol- +umetric and multi-view cnns for object classification on 3d data, +in: Proceedings of the IEEE conference on computer vision and +pattern recognition, 2016, pp. 5648–5656. +[28] L. Tchapmi, C. Choy, I. Armeni, J. Gwak, S. Savarese, Seg- +cloud: Semantic segmentation of 3d point clouds, in: 2017 in- +ternational conference on 3D vision (3DV), IEEE, 2017, pp. +537–547. +[29] P.-S. Wang, Y. Liu, Y.-X. Guo, C.-Y. Sun, X. Tong, O-cnn: +Octree-based convolutional neural networks for 3d shape analy- +sis, ACM Transactions On Graphics (TOG) 36 (4) (2017) 1–11. +[30] H.-Y. Meng, L. Gao, Y.-K. Lai, D. Manocha, Vv-net: Voxel +vae net with group convolutions for point cloud segmentation, +in: Proceedings of the IEEE/CVF international conference on +computer vision, 2019, pp. 8500–8508. +[31] T. Shao, Y. Yang, Y. Weng, Q. Hou, K. Zhou, H-cnn: Spatial +hashing based cnn for 3d shape analysis, IEEE transactions on +visualization and computer graphics 26 (7) (2018) 2403–2416. +[32] F. J. Lawin, M. Danelljan, P. Tosteberg, G. Bhat, F. S. Khan, +M. Felsberg, Deep projective 3d semantic segmentation, in: In- +ternational Conference on Computer Analysis of Images and +Patterns, Springer, 2017, pp. 95–107. +[33] J. Guerry, A. Boulch, B. Le Saux, J. Moras, A. Plyer, D. Fil- +liat, Snapnet-r: Consistent 3d multi-view semantic labeling for +robotics, in: Proceedings of the IEEE international conference +on computer vision workshops, 2017, pp. 669–678. +[34] M. Jaritz, J. Gu, H. Su, Multi-view pointnet for 3d scene un- +derstanding, in: Proceedings of the IEEE/CVF International +Conference on Computer Vision Workshops, 2019, pp. 0–0. +[35] Z. Yang, L. Wang, Learning relationships for multi-view 3d ob- +ject recognition, in: +Proceedings of the IEEE/CVF Interna- +tional Conference on Computer Vision, 2019, pp. 7505–7514. +[36] C. R. Qi, L. Yi, H. Su, L. J. Guibas, Pointnet++: Deep hierar- +chical feature learning on point sets in a metric space, Advances +in neural information processing systems 30. +[37] M. +Atzmon, +H. +Maron, +Y. +Lipman, +Point +convolutional +neural +networks +by +extension +operators, +arXiv +preprint +arXiv:1803.10091. +[38] Y. Li, R. Bu, M. Sun, W. Wu, X. Di, B. Chen, Pointcnn: Convo- +lution on x-transformed points, Advances in neural information +processing systems 31. +[39] Z.-H. Lin, S.-Y. Huang, Y.-C. F. Wang, Convolution in the +13 + +cloud: +Learning deformable kernels in 3d graph convolution +networks for point cloud analysis, in: +Proceedings of the +IEEE/CVF conference on computer vision and pattern recog- +nition, 2020, pp. 1800–1809. +[40] G. Li, M. Muller, A. Thabet, B. Ghanem, Deepgcns: Can gcns +go as deep as cnns?, in: Proceedings of the IEEE/CVF inter- +national conference on computer vision, 2019, pp. 9267–9276. +[41] M.-H. Guo, J.-X. Cai, Z.-N. Liu, T.-J. Mu, R. R. Martin, S.-M. +Hu, Pct: Point cloud transformer, Computational Visual Media +7 (2) (2021) 187–199. +[42] H. Zhao, L. Jiang, J. Jia, P. H. Torr, V. Koltun, Point trans- +former, in: Proceedings of the IEEE/CVF International Con- +ference on Computer Vision, 2021, pp. 16259–16268. +[43] W. Han, H. Wu, C. Wen, C. Wang, X. Li, Blnet: Bidirectional +learning network for point clouds, Computational Visual Media +(2022) 1–12. +[44] X. Yan, C. Zheng, Z. Li, S. Wang, S. Cui, Pointasnl: Robust +point clouds processing using nonlocal neural networks with +adaptive sampling, in: Proceedings of the IEEE/CVF Confer- +ence on Computer Vision and Pattern Recognition, 2020, pp. +5589–5598. +[45] Y. Xu, T. Fan, M. Xu, L. Zeng, Y. Qiao, Spidercnn: Deep +learning on point sets with parameterized convolutional filters, +in: Proceedings of the European Conference on Computer Vi- +sion (ECCV), 2018, pp. 87–102. +[46] X. Liu, Z. Han, Y.-S. Liu, M. Zwicker, Point2sequence: Learn- +ing the shape representation of 3d point clouds with an +attention-based sequence to sequence network, in: Proceedings +of the AAAI Conference on Artificial Intelligence, Vol. 33, 2019, +pp. 8778–8785. +[47] Y. Lin, Z. Yan, H. Huang, D. Du, L. Liu, S. Cui, X. Han, Fp- +conv: Learning local flattening for point convolution, in: Pro- +ceedings of the IEEE/CVF Conference on Computer Vision and +Pattern Recognition, 2020, pp. 4293–4302. +[48] W. Han, C. Wen, C. Wang, X. Li, Q. Li, Point2node: Correla- +tion learning of dynamic-node for point cloud feature modeling, +in: Proceedings of the AAAI Conference on Artificial Intelli- +gence, Vol. 34, 2020, pp. 10925–10932. +[49] H. Zhou, Y. Feng, M. Fang, M. Wei, J. Qin, T. Lu, Adaptive +graph convolution for point cloud analysis, in: Proceedings of +the IEEE/CVF International Conference on Computer Vision, +2021, pp. 4965–4974. +[50] K. T. Wijaya, D.-H. Paek, S.-H. Kong, Advanced feature learn- +ing on point clouds using multi-resolution features and learnable +pooling, arXiv preprint arXiv:2205.09962. +[51] G. Te, W. Hu, A. Zheng, Z. Guo, Rgcnn: Regularized graph cnn +for point cloud segmentation, in: Proceedings of the 26th ACM +international conference on Multimedia, 2018, pp. 746–754. +[52] S. Cheng, X. Chen, X. He, Z. Liu, X. Bai, Pra-net: +Point +relation-aware network for 3d point cloud analysis, IEEE Trans- +actions on Image Processing PP (99). +[53] L. Landrieu, M. Simonovsky, Large-scale point cloud semantic +segmentation with superpoint graphs, in: Proceedings of the +IEEE conference on computer vision and pattern recognition, +2018, pp. 4558–4567. +14 + diff --git a/3NAyT4oBgHgl3EQfo_gV/content/2301.00515v1.pdf b/3NAyT4oBgHgl3EQfo_gV/content/2301.00515v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..87c6408c439dbfbdabc443655cabd08bfd9d6ee2 --- /dev/null +++ b/3NAyT4oBgHgl3EQfo_gV/content/2301.00515v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38f99e325325ec339db4458a18e9ad613c7b76c40ac403376024ab4f7e4464df +size 3587107 diff --git a/3NAyT4oBgHgl3EQfo_gV/vector_store/index.faiss b/3NAyT4oBgHgl3EQfo_gV/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..75194d5a3d408b3e03626dca3cbd168c8a30aa4f --- /dev/null +++ b/3NAyT4oBgHgl3EQfo_gV/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ea2041feb817da058c01223a475070b60a9c3eb3f8355742636c288ef1c3ea5 +size 5505069 diff --git a/3NFST4oBgHgl3EQfYjhQ/vector_store/index.faiss b/3NFST4oBgHgl3EQfYjhQ/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..dd8dde3b0afbb95ec7db2a170d374d2977541c92 --- /dev/null +++ b/3NFST4oBgHgl3EQfYjhQ/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37f989248bf0e7d7631889667f317461d99ee6d1f09c4d31ff451edb81b5236b +size 3473453 diff --git a/3tAyT4oBgHgl3EQfb_f6/content/2301.00276v1.pdf b/3tAyT4oBgHgl3EQfb_f6/content/2301.00276v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ecde325eea0815a5afd14f5ee029afa4a2c57abf --- /dev/null +++ b/3tAyT4oBgHgl3EQfb_f6/content/2301.00276v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66a45f1bf1b8c0ce86f4c2698182a0db7fd8ea428831d4d3721b205967f1b3d6 +size 397459 diff --git a/3tAyT4oBgHgl3EQfb_f6/vector_store/index.faiss b/3tAyT4oBgHgl3EQfb_f6/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..4cdedb992c8ffd9ffc7e13ba0f91ab0ae06dc225 --- /dev/null +++ b/3tAyT4oBgHgl3EQfb_f6/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad3e59720d8a4c56546248e3fa00303564b64620f6fb76d6a3c812bffc166bf6 +size 5046317 diff --git a/3tAzT4oBgHgl3EQfuf3H/content/tmp_files/2301.01693v1.pdf.txt b/3tAzT4oBgHgl3EQfuf3H/content/tmp_files/2301.01693v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..b270d23a63ea662cfc32758222bb5fd2ef66bc4a --- /dev/null +++ b/3tAzT4oBgHgl3EQfuf3H/content/tmp_files/2301.01693v1.pdf.txt @@ -0,0 +1,940 @@ +Mortality modeling at old-age: an mixture model approach +Silvio C. Patricio* +The Interdisciplinary Centre on Population Dynamics, University of Southern Denmark +Fredy Castellares +Departamento de Estat´ıstica, Universidade Federal de Minas Gerais +Bernardo Queiroz +Departamento de Demografia, Universidade Federal de Minas Gerais +January 5, 2023 +Abstract +In this paper, we propose a mixture-based model for mortality modeling above age 70. The proposed +model is compared with 4 other widely used models: the Beard, Gompertz, Makeham, and Perks models. +Our model captures well the mortality rate’s behavior at all the ages. We applied the method to a country +with high quality data, Japan, and one with lower data quality, Brazil. In the comparative study for the +Japanese population, the model presented a better fit to the data, obtaining an absolute mean percentage +error of less than 7%, while the other models presented values greater than 30%. +Keywords: mixture model, old-age, mortality modeling +1 +Introduction +In the past centuries, much has been done to model the process of mortality in populations and its con- +sequences (Graunt, 1662; Gompertz, 1825a; Wilmoth, 2000; van Raalte, 2021). One of humanity’s most +outstanding achievements in the last century, perhaps the last millennium, has been the four-decade increase +in human life expectancy over the past 160 years (Vaupel et al., 2021; Wilmoth, 2000) and the improvement +in human mortality. All these changes in human longevity directly affect pension, welfare, and health care +systems (Cutler et al., 2006). +*silca@sam.sdu.dk +1 +arXiv:2301.01693v1 [stat.AP] 4 Jan 2023 + +Despite pioneering work by Graunt and Gompertz, understanding of mortality for older ages remains +a challenge, specially in developing countries with more defective data. In general, mortality estimates at +older ages are limited by small numbers both in the exposure, death count and problems with age declaration +(Feehan, 2018; Wrigley-Field, 2014; Nepomuceno et al., 2019). There is an important and ongoing debate +about the levels of mortality at older ages. In general terms, the debate is whether mortality at older ages +is declining or continues to increase (Gavrilov & Gavrilova, 2019; Feehan, 2018). In some settings, such +as Brazil, there is also an important question on the crossover of mortality at older ages when comparing +different population sub-groups (Nepomuceno et al., 2019; Pinheiro & Queiroz, 2019; Gomes & Turra, +2009). +In addition to the problem of the quality of the data, there is a debate on hypotheses of selectivity and +of the biological limit of mortality in human populations that, in different ways, would impact the behavior +of mortality taxes in more advanced ages. One of the consequences of the mortality selectivity hypothesis +would be a greater rate of deceleration of the rates of mortality in more advanced ages. In this context, +there are a series of models to explain mortality behavior at older ages. The choice of the appropriate model +depends on the hypotheses assumed, whether in relation to the quality of the two data or in relation to the +impacts produced by the selectivity. +There are several possible explanations for the observed results and estimates. First one is related to +data quality in different areas of a country, across sub-population groups and age. For instance, it could be a +consequence of different age misreporting patterns or issues with quality of vital registration systems (Black +et al., 2017). Preston et al (2000) investigated how different types of age misreporting can affect estimates +of mortality rates at older ages, by analyzing the effects of three patterns of age misreporting: net age +overstatement, net age understatement, and symmetric age misreporting.. It is also possible that mortality +selection plays a role in the observed levels of mortality at older ages (Barbi et al., 2018; Wachter, 2018). +In the context of higher mortality rates at young ages, survivors to older ages would be physiologically +stronger and then live longer than others. +Unfortunately, data quality at older ages limits the understanding of mortality and the evolution of +survivorship at older ages. Feehan (2018) uses alternative methods to cohort mortality above age 80. He +finds that no model can be universally applied to estimate old-age mortality, but he argues that Log-Quad +(Wilmoth et al., 2012) provides a good fit. However, the log-quad method is based on standard mortality +changes from the Human Mortality Database that is constructed from a series of countries in the Northern +Hemisphere and might be limited to low and middle income countries. +In this paper, we suggest a model that captures decline in mortality rates at older ages, which is a +characteristic observed in some populations. Based on the proposed model, we perform a comparative study +using establish mortality laws with our proposed approach. The analysis was split into two parts. First, to +2 + +compare the four widely used models with the proposed model: in this part we will study the behavior of +these models in two databases: one with good quality data on mortality in Japan in 2015 (obtained from The +Human Mortality Database of mortality), and the other database that has limited data regarding mortality +in Brazil in 2010. In it the models will be evaluated from Mean Absolute Percentage Error (MAPE) of +the log-hazard using the leave-one-out cross-validation method, and the model with the least MAPE will +all be the best model. Moreover, as some models are complex, the genetic algorithm was used to obtain +the estimates via maximum likelihood. Using this algorithm ensures convergence to the global maximum +value. The second part applies the proposed model to different databases, and aims to understand the model +behavior and also to verify its potential for application to real data.The model presented a better fit to the +data, obtaining an absolute mean percentage error of less than 7%, while the other models presented values +greater than 30%. +2 +Models specification’s and parameter estimation +Considering a non negative random variable (r.v.) T defined in a probability space (R+, B, Pθ), representing +the individual life-spam, the r.v. T can be characterized by the survival function +S(x|θ) = Pθ(T > x) +which is associated with the density +f(x|θ) = − ∂ +∂xS(x|θ). +If S is a continuous survival function associated with a f density function, then the function µ defined in +R+ by +µ(x|θ) = lim +ε↓0 +Pθ(x < T < x + ε|X > x) +ε += f(x|θ) +S(x|θ) +it’s called the T mortality force. This function is usually used to describe the force of mortality for a group +of people or population. +The inferences in the model are based on the assumption that the number of death has a Poisson dis- +tribution. Therefore, be D = (D0, D1, . . . , Dm)′ a random sample with Poisson distribution, with Dk +representing the number of deaths between ages [k, k + 1), with k = 0, . . . , m, i.e. the number of death of +people with k years old. +For this approach it is considered that E(Dk) = µ(k|θ)Ek, with µ(k|θ) representing the mortality force +at age k, where θ = (θ1, θ2, . . . , θp)′ is the parameter vector that characterizes the mortality rate, and Ek the +population at age k exposed to risk, that are assumptions widely used by demographers (Brillinger et al., +3 + +1986). Also, as it is the Poisson distribution, we have to V(Dk) = µ(k|θ)Ek, same value of expectation. +Be D = (D0, . . . , Dm)′ e E = (E0, . . . , Em)′. The log-likelihood function from θ is given by +ℓ(θ|D) = +m +� +k=1 +Dk log λ(θ, k) − λ(θ, k), +(1) +with λ(θ, x) = µ(x|θ)E(x). The likelihood estimate �θ is obtained from maximizing the log-likelihood +function with in equation 1, with respect to θ. Obtaining the partial derivative vector of the equation 1, with +respect to θi, i = 1, . . . , p, we have +∂ℓ(θ|D) +∂θi += +m +� +k=1 +� +Dk +µ(k|θ) − Ek +� ∂µ(k|θ) +∂θi +. +(2) +The likelihood estimation can also be obtained by equating the partial derivative vector to zero and simul- +taneously solving the system of equations. The explicit form of the gradient vector is explained for each of +the models considered in this article. The Newton-Raphson method can be applied to solve the likelihood +equation to obtain the estimate �θ. +2.1 +Beard model +In this model introduced in Beard (1959), we have that the force of mortality is given by +µ(k|θ) = +aebk +1 + δebk +with θ = (a, b, δ)′ ∈ R3 ++. From which we calculate the partial derivative with respect to a and b. E Equation +2 gives us a general equation for the gradient vector, where it depends only on the mortality rate and its +partial derivative with respect to each parameter. Hence we get +∂ℓ(θ|D) +∂a += +m +� +k=1 +� +Dk +�1 + δebk +aebk +� +− Ek +� +ebk +(1 + δebk) +∂ℓ(θ|D) +∂b += +m +� +k=1 +� +Dk +�1 + δebk +aebk +� +− Ek +� +akebk +(1 + δebk)2 +∂ℓ(θ|D) +∂δ += +m +� +k=1 +� +Dk +�1 + δebk +aebk +� +− Ek +� +ae2bk +(1 + δebk)2 +4 + +representing the gradient vector. +2.2 +Gompertz model +In this model introduced in Gompertz (1825b), we have that the force of mortality is given by +µ(k|θ) = aebk, +with θ = (a, b)′ ∈ R2 ++. So for the gradient vector we have +∂ℓ(θ|D) +∂a += +m +� +k=1 +� Dk +aebk − Ek +� +ebk +∂ℓ(θ|D) +∂b += +m +� +k=1 +� Dk +aebk − Ek +� +akebk +2.3 +Makeham model +In this model introduced in Makeham (1860), we have that the force of mortality is given by +µ(k|θ) = aebk + c, +with θ = (a, b, c)′ ∈ R3 ++. So for the gradient vector we have +∂ℓ(θ|D) +∂a += +m +� +k=1 +� +Dk +aebk + c − Ek +� +ebk +∂ℓ(θ|D) +∂b += +m +� +k=1 +� +Dk +aebk + c − Ek +� +akebk +∂ℓ(θ|D) +∂c += +m +� +k=1 +� +Dk +aebk + c − Ek +� +5 + +2.4 +Perks model +In this model introduced in Perks (1932), we have that the force of mortality is given by +µ(k|θ) = γ + aebk +1 + δebk +with θ = (a, b, γ, δ)′. So for the gradient vector we have +∂ℓ(θ|D) +∂a += +m +� +k=1 +� +Dk +� 1 + δebk +γ + aebk +� +− Ek +� +ebk +1 + δebk +∂ℓ(θ|D) +∂b += +m +� +k=1 +� +Dk +� 1 + δebk +γ + aebk +� +− Ek +� k(a − δγ)ebk +(1 + δebk)2 +∂ℓ(θ|D) +∂γ += +m +� +k=1 +� +Dk +� 1 + δebk +γ + aebk +� +− Ek +� +1 +1 + δebk +∂ℓ(θ|D) +∂δ += +m +� +k=1 +� +Dk +� 1 + δebk +γ + aebk +� +− Ek +� ebk � +aebk + γ +� +(1 + δebk)2 +2.5 +Mixture model +As with Makeham, we will seek to decompose mortality into two components: premature and senescent +mortality, respectively modeled by an exponential and a Gompertz component. However, Makeham dis- +tinguishes these components through mortality force, and here we propose to distinguish them through +distribution. Therefore, we are considering that the r.v. T introduced at the beginning of this session is +associated with a probability density function f, which is define as: +f(x|θ) = p +� +λe−λx� ++ (1 − p) +� +ab exp +� +a +� +ebx − 1 +� ++ bx +�� +(3) +with θ = (a, b, λ, p)′. +The density f is a Gompertz and a exponential distribution a mixture. The Gompertz distribution will fit +the senescence deaths count, and the exponential distribution will fit the premature deaths, such as accidents +and disease. Briefly, this model considers the existence of two sub populations in the death count, one +6 + +Gompertz and the other Exponential, and the parameters p and q = 1 − p represent the proportions of each +one. +Since the random variable T is associated with a density function, we can also associate it with a hazard +function. In this case the force of mortality is defined by: +µ(x|θ) = f(x|θ) +S(x|θ) = p +� +λe−λx� ++ (1 − p) +� +ab exp +� +a +� +ebx − 1 +� ++ bx +�� +pe−λx + (1 − p) exp{−a (ebx − 1)} +, +(4) +for which there is no straightforward interpretation. Which is lost due to the ease of deriving functions such +as statistical moments and expected average residual life (for more details, see Finkelstein (2009)) .From +7 + +this we can get the gradient vector which, for this model, is given by +∂ℓ(θ|D) +∂a += +m +� +k=1 +� +Dk +pe−λk + (1 − p) exp{−a +� +ebk − 1 +� +} +p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek +� +× +× b(1 − p)ea(keb−1)+bλ + ab(1 − p)(keb − 1)ea(keb−1)+bx +pe−λk + (1 − p) exp{−a (ebk − 1)} ++ ++ (−1)(1 − p)(1 − ebk)e−a(ebk−1) � +p +� +λe−λk� ++ (1 − p) +� +ab exp +� +a +� +ebk − 1 +� ++ bk +��� +(pe−λk + (1 − p) exp{−a (ebk − 1)})2 +∂ℓ(θ|D) +∂b += +m +� +k=1 +� +Dk +pe−λk + (1 − p) exp{−a +� +ebk − 1 +� +} +p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek +� +× +× +a(1 − p)xebx−a(ebx−1) � +ab(1 − p)ea(xebx−1)+bx + λpe−λx� +� +(1 − p)ea(ebx−1) + pe−λx�2 ++ ++ a(1 − p)ea(ebx−1)+bx + ab(1 − p)ea(ebx−1)+bx � +axeb + x +� +(1 − p)ea(ebx−1) + pe−λx +∂ℓ(θ|D) +∂λ += +m +� +k=1 +� +Dk +pe−λk + (1 − p) exp{−a +� +ebk − 1 +� +} +p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek +� +× +× +� +�� +pe−λx − λpxe−λx +(1 − p)e−a(ebx−1) + pe−λx + +pxe−λx � +ab(1 − p)ea(xeb−1)+bx + λpe−λx� +� +(1 − p)e−a(ebx−1) + pe−λx +�2 +� +�� +∂ℓ(θ|D) +∂p += +m +� +k=1 +� +Dk +pe−λk + (1 − p) exp{−a +� +ebk − 1 +� +} +p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek +� +× +× +� +�� λe−λx − abea(xeb−1)+bx +(1 − p)e−a(ebx−1) + pe−λx − +� +e−λx − e−a(ebx−1)� � +ab(1 − p)ea(xeb−1)+bx + λpe−λx� +� +(1 − p)e−a(ebx−1) + pe−λx +�2 +� +�� +3 +Data and empirical results +In order to evaluate the proposed model, we will compare its performance on high and low-quality data. +For this, we will evaluate its performance against four other models, using the Mean Absolute Percentage +Error (MAPE) combined with the leave-one-out cross-validation method, which will measure the average +distance between the log-hazard and the log-mortality rate. Moreover, as some models are highly nonlinear, +8 + +the Genetic Algorithm (Scrucca, 2013; Mirjalili, 2019) will be used to maximize the likelihood function. +This algorithm ensures convergence to the global maximum value. +3.1 +Models comparison +In a high quality data setting +In this scenario, we will use mortality data from Japan in 2015 obtained from The Human Mortality +Database (HMD). The observed value of log µ is linearly increasing to a certain age, and then has a sharp +drop. This behavior was also noted this country in the last three decades. However this is not restricted +to Japan, other countries like Sweden, Germany, USA and Korea also had the same mortality behavior. +The Figure 1 shows the estimated log-hazard function. We can clearly see the models of Beard, Gompertz, +Makeham and Perks were not able to fit properly the mortality rate after age 100. +70 +80 +90 +100 +110 +−5 +−4 +−3 +−2 +−1 +0 +1 +Age +log(µ) +Beard +Gompertz +Makeham +Perks +Mixture model +Figure 1: Japan 2015 modeling +The Gompertz model consider force of mortality being log-linear, but clearly this behavior does not +describe the entire observed curve. For this model the estimated parameter is �θ = (0.0179, 0.1094)′. And +this model has a MAPE of 34.0127, i.e., this model’s predictions are on average 34.0127 % distant of the +observed value. A similar result can be obtained from the Makeham model, which has estimated parameter +�θ = (0.0174, 0.1103, 0.0008)′, and MAPE 33.0288. +The Beard can be seen as the ratio of a Gompertz and a Makeham models with c = 1, with the pa- +rameters estimated by ML �θ = (0.0165, 0.1216, 0.0073)′. Despite Beard’s combination of Makeham and +Gompertz models, this model provided the worse fit, reaching a MAPE of 55.6189. +The Perks model also has a similar construction to Beard. It is the ratio between two Makeham models. +For this model we estimate �θ = (0.0135, 0.1313, 0.0040, 0.0075)′. And as expected, this model had a very +9 + +similar behavior to the previous model, including in MAPE of 51.3591, suggesting that this model does not +fit well to the data. +Finally, for the proposed mixture-based model, we estimated �θ = (0.1155, 0.0163, 0.2061, 0.0126)′, and +a MAPE of 6.9193, the best of the models presented in this study. In addition, this model was the only one +that was able to capture the sharp drop in the mortality rate. With the estimated parameters we can interpret +that the non-senescence death represents 1.2599 % of the total death after age 70. +In a low quality data setting +We observed that the model works well on data that has good quality, and now we aim to understand how +the model behaves when the data has limitations. In this case we are going to use data from Brazil from +2010 (Queiroz et al., 2020; Gonzaga & Schmertmann, 2016) . Previous studies showa a mortality crossover +above age 60 when comparing more and less developed states in Brazil using the Topals model (Queiroz +et al., 2020; Gonzaga & Schmertmann, 2016). It is argued that the result is related to the level of complete- +ness of death counts, age misreporting and mortality selection. Thus, it is an important and relevant case +study for the application of our prosed mixture model. For this, as before, we will compare the performance +of the 5 models presented through MAPE. +70 +75 +80 +85 +90 +95 +100 +−3.0 +−2.5 +−2.0 +−1.5 +−1.0 +−0.5 +0.0 +Age +log(µ) +Beard +Gompertz +Makeham +Perks +Mixture model +Figure 2: Brazil 2010 modeling +For the first model (Beard) we estimated �θ = (0.0375, 0.0942, 5.5625 × 10−8)′, and a MAP of 20.4629, +i.e., on average this model distanced by 20 % of the mortality rate. We also got a similar conclusion about +the Gompertz model, estimating �θ = (0.0375, 0.0943)′ and MAPE about 20.4499. +The Makeham and Perks models also obtained similar results. For Makeham it was estimated �θ = +10 + +(0.01481, 0.1338, 0.03131)′ resulting in a MAP of 14.5473, and for Perks model it was estimated �θ = +(0.0163, 0.0129, 0.0290, 3.4272 × 10−7)′ which results in MAPE of 14.9002. +Finally, for the proposed model was estimated �θ = (0.1036, 0.0315, 0.2389, 0.0692)′, and a MAPE of +18.0038%, which indicates that the model is not able to capture mortality well in these data. Therefore, +the results found in this application match the results discussed in Feehan (2018) on the power of models +capturing mortality at advanced ages universally. +3.2 +Model applications +As we have seen, the proposed model has a high capacity to fit the mortality at older ages. Therefore, we +will illustrate the power of this model by applying it to mortality data from Japan (1993 and 2002), Sweden +(2011), Germany (2016), USA (1990 and 1992), Spain (2012) and Italy (2011). Table 1 represents the +estimate for each dataset, and Figure 4 represents their respective decomposed distribution of death. +Table 1: Parameters estimated. +Country +Year +ˆa +ˆb +ˆc +ˆp +MAPE +Japan +1993 +0.10911 +0.02916 +0.21615 +0.00250 +8.86459 +Japan +2002 +0.10897 +0.02425 +0.30152 +0.03276 +7.49451 +Sweden +2011 +0.12390 +0.01520 +0.26448 +0.01559 +12.27019 +Germany +2016 +0.11046 +0.02090 +0.22283 +0.00397 +10.68258 +USA +1990 +0.08845 +0.03569 +0.20360 +0.02569 +3.80694 +USA +1992 +0.09057 +0.03404 +0.20575 +0.03217 +2.91887 +Spain +2012 +0.12372 +0.01544 +0.22751 +0.01307 +12.38755 +Italy +2011 +0.11606 +0.01768 +0.21710 +0.01999 +13.24385 +In Table 1 it can be seen that the estimated values for p are small, less than 0.04, which indicates that the +proportion of premature deaths above age 70 does not exceed 4%. This result was already expected, since +by truncating the mortality data at age 70, we are excluding infant mortality and mortality hump (Remund +et al., 2018), and we only observe the tail of the distribution of premature mortality. Furthermore, the our +result is also in agreement with Horiuchi & Wilmoth’s results, that above age 75 mortality decelerates for +most causes of death (Horiuchi & Wilmoth, 1997). +The estimated values for the c parameter are similar, and concentrated around 0.23. This suggests that, +despite having different proportions, the distributions of premature death are similar, as can be seen on the +left in Figure 3. Such similarity was not observed in the senescent death distributions, which have a marked +difference, as can be seen on the right in Figure 3. Despite this, it is clear that the modal age of death is +between 80 and 90, which is consistent with previous studies and (Horiuchi et al., 2013). +11 + +70 +80 +90 +100 +110 +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +0.30 +Age +prematur mortality distribution +JPN − 1993 +JPN − 2002 +SWE − 2011 +DEUTNP − 2016 +USA − 1990 +USA − 1992 +ESP − 2012 +ITA − 2011 +70 +80 +90 +100 +110 +0.00 +0.01 +0.02 +0.03 +0.04 +0.05 +Age +senescence mortality distribution +JPN − 1993 +JPN − 2002 +SWE − 2011 +DEUTNP − 2016 +USA − 1990 +USA − 1992 +ESP − 2012 +ITA − 2011 +Figure 3: Estimates of mortality components. +70 +80 +90 +100 +110 +0 +10000 +20000 +30000 +JPN − 1993 +Age +dx +70 +80 +90 +100 +110 +0 +10000 +20000 +30000 +JPN − 2002 +Age +dx +70 +80 +90 +100 +110 +0 +1000 +2000 +3000 +4000 +SWE − 2011 +Age +dx +70 +80 +90 +100 +110 +0 +10000 +20000 +30000 +DEUTNP − 2016 +Age +dx +70 +80 +90 +100 +110 +0 +20000 +40000 +60000 +USA − 1990 +Age +dx +70 +80 +90 +100 +110 +0 +20000 +40000 +60000 +USA − 1992 +Age +dx +70 +80 +90 +100 +110 +0 +5000 +10000 +15000 +ESP − 2012 +Age +dx +70 +80 +90 +100 +110 +0 +5000 +15000 +25000 +ITA − 2011 +Age +dx +Senescence deaths +Premature deaths +Overall deaths +Figure 4: Estimations +The Figure 4 shows the distribution of death estimated and broken down into premature and senescent +deaths. In it, we can observe the quality of fit of the estimated model (black line). In addition, it is possible +12 + +to see that for Japan in 1993 and Germany in 2016, there were practically no premature deaths after age 70, +this could also be inferred from analyzing the Table 1, where the values of estimate for p are small. +4 +Conclusions and future works +Robust estimates of mortality rates in advanced ages are a challenge for demographers for various reasons. +Even in populations with good records of deaths and population there are disturbances in the function of +the low number of events and/or some limitation in the information on the age of death. In case of countries +where the problems of data quality is present, the challenges are greater. +For some centuries there has been an ambition to decompose mortality into interpretable components. +The best known are those proposed by Makeham (1860) and Heligman & Pollard (1980). However, in recent +years researchers have devoted to this problem (Remund et al., 2017; Mazzuco et al., 2021). Therefore, +this paper aims to bring a contribution to this discussion, delivering a new parametric model capable of +decomposing mortality through mixing models in a frequentest framework. Mazzuco et al. (2021) proposes +an approach similar to the one proposed in this paper, however the authors use a Bayesian framework. +As we have seen, the proposed model fits well the mortality curve, specially above age 100, and this +model does it without overparametrization, as Heligman & Pollard (1980). Furthermore, as it is a mixture +model, the model is flexible to become the Gompertz model (p = 0), or the Exponential model (p = 1). +When 0 < p < 1, the model fits a mortality curve with inflexion point (mortality deceleration) and plateau +(mortality plateau). +The use of Brazilian mortality data shed light on the performance of the model in a low quality database. +We could see that the mixture-based model captures the dynamics of mortality well only when there is a +drop in mortality rates, serving as an alternative to models that do not have this characteristic. +Although the present work presents a model capable of capturing the specific dynamics of the force of +mortality in certain populations, it also sheds light on other problems to be solved. Since the model is based +on mixtures of distributions, we are interested in deriving hypothesis tests on the estimated parameters. One +of the main ones is to test if p = 0, i.e. whether the model can be reduced to a Gompertz model; similar +interest to that studied in B¨ohnstedt & Gampe (2019), when a hypothesis test for Gamma heterogeneity is +derived, and important statistical properties are studied. +Finally, in the recently published paper Vaupel et al. (2022) point out that estimating senescence mor- +tality is of fundamental importance to understand the pace of human aging, human longevity and how far +we can live. In this sense, this work brought a method capable of identifying and estimating senescent mor- +tality, without having a great computational cost, often seen in Bayesian analysis (See Barber et al. (2015)), +or overparameterized models, as seen in Heligman & Pollard (1980). +13 + +Bibliography +Barber, S., Voss, J., & Webster, M. (2015). The rate of convergence for approximate bayesian computation. +Electronic Journal of Statistics, 9(1), 80–105. +Barbi, E., Lagona, F., Marsili, M., Vaupel, J. W., & Wachter, K. W. (2018). The plateau of human mortality: +Demography of longevity pioneers. Science, 360(6396), 1459–1461. +Beard, R. E. (1959). Note on some mathematical mortality models. In Ciba Foundation Symposium-The +Lifespan of Animals (Colloquia on Ageing), volume 5 (pp. 302–311).: Wiley Online Library. +Black, D. A., Hsu, Y.-C., Sanders, S. G., Schofield, L. S., & Taylor, L. J. (2017). The methuselah effect: The +pernicious impact of unreported deaths on old-age mortality estimates. Demography, 54(6), 2001–2024. +B¨ohnstedt, M. & Gampe, J. (2019). Detecting mortality deceleration: Likelihood inference and model +selection in the gamma-gompertz model. Statistics & Probability Letters, 150, 68–73. +Brillinger, D. R. et al. (1986). The natural variability of vital rates and associated statistics. Biometrics, +42(4), 693–734. +Cutler, D., Deaton, A., & Lleras-Muney, A. (2006). The determinants of mortality. Journal of economic +perspectives, 20(3), 97–120. +Feehan, D. M. (2018). Separating the signal from the noise: evidence for deceleration in old-age death +rates. Demography, 55(6), 2025–2044. +Finkelstein, M. (2009). Understanding the shape of the mixture failure rate (with engineering and demo- +graphic applications). Applied Stochastic Models in Business and Industry, 25(6), 643–663. +Gavrilov, L. A. & Gavrilova, N. S. (2019). Late-life mortality is underestimated because of data errors. +PLoS biology, 17(2), e3000148. +Gomes, M. M. F. & Turra, C. M. (2009). The number of centenarians in brazil: indirect estimates based on +death certificates. Demographic Research, 20, 495–502. +Gompertz, B. (1825a). Xxiv. on the nature of the function expressive of the law of human mortality, and +on a new mode of determining the value of life contingencies. in a letter to francis baily, esq. frs &c. +Philosophical transactions of the Royal Society of London, (115), 513–583. +14 + +Gompertz, B. (1825b). Xxiv. on the nature of the function expressive of the law of human mortality, and +on a new mode of determining the value of life contingencies. in a letter to francis baily, esq. frs &c. +Philosophical transactions of the Royal Society of London, 0(115), 513–583. +Gonzaga, M. R. & Schmertmann, C. P. (2016). Estimating age-and sex-specific mortality rates for small +areas with topals regression: an application to brazil in 2010. Revista Brasileira de Estudos de Populac¸˜ao, +33, 629–652. +Graunt, J. (1662). Natural and political observations mentioned in a following index, and made upon the +bills of mortality. In Mathematical Demography (pp. 11–20). Springer. +Heligman, L. & Pollard, J. H. (1980). The age pattern of mortality. Journal of the Institute of Actuaries, +107(1), 49–80. +Horiuchi, S., Ouellette, N., Cheung, S. L. K., & Robine, J.-M. (2013). Modal age at death: lifespan indicator +in the era of longevity extension. Vienna Yearbook of Population Research, (pp. 37–69). +Horiuchi, S. & Wilmoth, J. R. (1997). Age patterns of the life table aging rate for major causes of death +in japan, 1951–1990. The Journals of Gerontology Series A: Biological Sciences and Medical Sciences, +52(1), B67–B77. +Makeham, W. M. (1860). On the law of mortality and the construction of annuity tables. Journal of the +Institute of Actuaries, 8(6), 301–310. +Mazzuco, S. S., Suhrcke, M. M., & Zanotto, L. L. (2021). How to measure premature mortality? a proposal +combining “relative” and “absolute” approaches. Population health metrics, 19(1), 1–14. +Mirjalili, S. (2019). Genetic algorithm. In Evolutionary algorithms and neural networks (pp. 43–55). +Springer. +Nepomuceno, M., Turra, C., et al. (2019). The population of centenarians in Brazil: historical estimates +from 1900 to 2000. Technical report, Max Planck Institute for Demographic Research, Rostock, Ger- +many. +Perks, W. (1932). On some experiments in the graduation of mortality statistics. Journal of the Institute of +Actuaries, 63(1), 12–57. +Pinheiro, P. C. & Queiroz, B. L. (2019). Regional disparities in brazilian adult mortality: an analysis using +modal age at death (m) and compression of mortality (iqr). Anais, (pp. 1–20). +15 + +Queiroz, B. L., Gonzaga, M. R., Vasconcelos, A., Lopes, B. T., & Abreu, D. M. (2020). Comparative +analysis of completeness of death registration, adult mortality and life expectancy at birth in brazil at the +subnational level. Population health metrics, 18(1), 1–15. +Remund, A., Camarda, C. G., & Riffe, T. (2017). Analyzing the young adult mortality hump in r with +morthump. Rostock: Max Planck Institute for Demographic Research (MPIDR Technical Report TR- +2018-003). +Remund, A., Camarda, C. G., & Riffe, T. (2018). A cause-of-death decomposition of young adult excess +mortality. Demography, 55(3), 957–978. +Scrucca, L. (2013). GA: A package for genetic algorithms in R. Journal of Statistical Software, 53(4), +1–37. +van Raalte, A. A. (2021). What have we learned about mortality patterns over the past 25 years? Population +Studies, 75(sup1), 105–132. +Vaupel, J. W. et al. (2022). The Pull of the Plateau and the Sway of the Mode: Formal Relationships to +Estimate the Pace of Senescence. Technical report, Center for Open Science. +Vaupel, J. W., Villavicencio, F., & Bergeron-Boucher, M.-P. (2021). Demographic perspectives on the rise +of longevity. Proceedings of the National Academy of Sciences, 118(9). +Wachter, K. W. (2018). Hypothetical errors and plateaus: A response to newman. PLoS biology, 16(12), +e3000076. +Wilmoth, J., Zureick, S., Canudas-Romo, V., Inoue, M., & Sawyer, C. (2012). A flexible two-dimensional +mortality model for use in indirect estimation. Population studies, 66(1), 1–28. +Wilmoth, J. R. (2000). Demography of longevity: past, present, and future trends. Experimental gerontol- +ogy, 35(9-10), 1111–1129. +Wrigley-Field, E. (2014). Mortality deceleration and mortality selection: three unexpected implications of +a simple model. Demography, 51(1), 51–71. +16 + diff --git a/3tAzT4oBgHgl3EQfuf3H/content/tmp_files/load_file.txt b/3tAzT4oBgHgl3EQfuf3H/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..bba941732c54c4db598c93f3a2e676e3fdbbc235 --- /dev/null +++ b/3tAzT4oBgHgl3EQfuf3H/content/tmp_files/load_file.txt @@ -0,0 +1,780 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf,len=779 +page_content='Mortality modeling at old-age: an mixture model approach Silvio C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Patricio* The Interdisciplinary Centre on Population Dynamics, University of Southern Denmark Fredy Castellares Departamento de Estat´ıstica, Universidade Federal de Minas Gerais Bernardo Queiroz Departamento de Demografia, Universidade Federal de Minas Gerais January 5, 2023 Abstract In this paper, we propose a mixture-based model for mortality modeling above age 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The proposed model is compared with 4 other widely used models: the Beard, Gompertz, Makeham, and Perks models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Our model captures well the mortality rate’s behavior at all the ages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' We applied the method to a country with high quality data, Japan, and one with lower data quality, Brazil.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In the comparative study for the Japanese population, the model presented a better fit to the data, obtaining an absolute mean percentage error of less than 7%, while the other models presented values greater than 30%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Keywords: mixture model, old-age, mortality modeling 1 Introduction In the past centuries, much has been done to model the process of mortality in populations and its con- sequences (Graunt, 1662;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Gompertz, 1825a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Wilmoth, 2000;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' van Raalte, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' One of humanity’s most outstanding achievements in the last century, perhaps the last millennium, has been the four-decade increase in human life expectancy over the past 160 years (Vaupel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Wilmoth, 2000) and the improvement in human mortality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' All these changes in human longevity directly affect pension, welfare, and health care systems (Cutler et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' silca@sam.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='sdu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dk 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01693v1 [stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='AP] 4 Jan 2023 Despite pioneering work by Graunt and Gompertz, understanding of mortality for older ages remains a challenge, specially in developing countries with more defective data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In general, mortality estimates at older ages are limited by small numbers both in the exposure, death count and problems with age declaration (Feehan, 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Wrigley-Field, 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Nepomuceno et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' There is an important and ongoing debate about the levels of mortality at older ages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In general terms, the debate is whether mortality at older ages is declining or continues to increase (Gavrilov & Gavrilova, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Feehan, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In some settings, such as Brazil, there is also an important question on the crossover of mortality at older ages when comparing different population sub-groups (Nepomuceno et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Pinheiro & Queiroz, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Gomes & Turra, 2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In addition to the problem of the quality of the data, there is a debate on hypotheses of selectivity and of the biological limit of mortality in human populations that, in different ways, would impact the behavior of mortality taxes in more advanced ages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' One of the consequences of the mortality selectivity hypothesis would be a greater rate of deceleration of the rates of mortality in more advanced ages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In this context, there are a series of models to explain mortality behavior at older ages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The choice of the appropriate model depends on the hypotheses assumed, whether in relation to the quality of the two data or in relation to the impacts produced by the selectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' There are several possible explanations for the observed results and estimates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' First one is related to data quality in different areas of a country, across sub-population groups and age.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' For instance, it could be a consequence of different age misreporting patterns or issues with quality of vital registration systems (Black et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Preston et al (2000) investigated how different types of age misreporting can affect estimates of mortality rates at older ages, by analyzing the effects of three patterns of age misreporting: net age overstatement, net age understatement, and symmetric age misreporting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='. It is also possible that mortality selection plays a role in the observed levels of mortality at older ages (Barbi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Wachter, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In the context of higher mortality rates at young ages, survivors to older ages would be physiologically stronger and then live longer than others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Unfortunately, data quality at older ages limits the understanding of mortality and the evolution of survivorship at older ages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Feehan (2018) uses alternative methods to cohort mortality above age 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' He finds that no model can be universally applied to estimate old-age mortality, but he argues that Log-Quad (Wilmoth et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2012) provides a good fit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' However, the log-quad method is based on standard mortality changes from the Human Mortality Database that is constructed from a series of countries in the Northern Hemisphere and might be limited to low and middle income countries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In this paper, we suggest a model that captures decline in mortality rates at older ages, which is a characteristic observed in some populations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Based on the proposed model, we perform a comparative study using establish mortality laws with our proposed approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The analysis was split into two parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' First, to 2 compare the four widely used models with the proposed model: in this part we will study the behavior of these models in two databases: one with good quality data on mortality in Japan in 2015 (obtained from The Human Mortality Database of mortality), and the other database that has limited data regarding mortality in Brazil in 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In it the models will be evaluated from Mean Absolute Percentage Error (MAPE) of the log-hazard using the leave-one-out cross-validation method, and the model with the least MAPE will all be the best model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Moreover, as some models are complex, the genetic algorithm was used to obtain the estimates via maximum likelihood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Using this algorithm ensures convergence to the global maximum value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The second part applies the proposed model to different databases, and aims to understand the model behavior and also to verify its potential for application to real data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='The model presented a better fit to the data, obtaining an absolute mean percentage error of less than 7%, while the other models presented values greater than 30%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 2 Models specification’s and parameter estimation Considering a non negative random variable (r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=') T defined in a probability space (R+, B, Pθ), representing the individual life-spam, the r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' T can be characterized by the survival function S(x|θ) = Pθ(T > x) which is associated with the density f(x|θ) = − ∂ ∂xS(x|θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' If S is a continuous survival function associated with a f density function, then the function µ defined in R+ by µ(x|θ) = lim ε↓0 Pθ(x < T < x + ε|X > x) ε = f(x|θ) S(x|θ) it’s called the T mortality force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' This function is usually used to describe the force of mortality for a group of people or population.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The inferences in the model are based on the assumption that the number of death has a Poisson dis- tribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Therefore, be D = (D0, D1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' , Dm)′ a random sample with Poisson distribution, with Dk representing the number of deaths between ages [k, k + 1), with k = 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' , m, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' the number of death of people with k years old.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' For this approach it is considered that E(Dk) = µ(k|θ)Ek, with µ(k|θ) representing the mortality force at age k, where θ = (θ1, θ2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' , θp)′ is the parameter vector that characterizes the mortality rate, and Ek the population at age k exposed to risk, that are assumptions widely used by demographers (Brillinger et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 3 1986).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Also, as it is the Poisson distribution, we have to V(Dk) = µ(k|θ)Ek, same value of expectation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Be D = (D0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' , Dm)′ e E = (E0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' , Em)′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The log-likelihood function from θ is given by ℓ(θ|D) = m � k=1 Dk log λ(θ, k) − λ(θ, k), (1) with λ(θ, x) = µ(x|θ)E(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The likelihood estimate �θ is obtained from maximizing the log-likelihood function with in equation 1, with respect to θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Obtaining the partial derivative vector of the equation 1, with respect to θi, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' , p, we have ∂ℓ(θ|D) ∂θi = m � k=1 � Dk µ(k|θ) − Ek � ∂µ(k|θ) ∂θi .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2) The likelihood estimation can also be obtained by equating the partial derivative vector to zero and simul- taneously solving the system of equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The explicit form of the gradient vector is explained for each of the models considered in this article.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The Newton-Raphson method can be applied to solve the likelihood equation to obtain the estimate �θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1 Beard model In this model introduced in Beard (1959), we have that the force of mortality is given by µ(k|θ) = aebk 1 + δebk with θ = (a, b, δ)′ ∈ R3 +.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' From which we calculate the partial derivative with respect to a and b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' E Equation 2 gives us a general equation for the gradient vector, where it depends only on the mortality rate and its partial derivative with respect to each parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Hence we get ∂ℓ(θ|D) ∂a = m � k=1 � Dk �1 + δebk aebk � − Ek � ebk (1 + δebk) ∂ℓ(θ|D) ∂b = m � k=1 � Dk �1 + δebk aebk � − Ek � akebk (1 + δebk)2 ∂ℓ(θ|D) ∂δ = m � k=1 � Dk �1 + δebk aebk � − Ek � ae2bk (1 + δebk)2 4 representing the gradient vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='2 Gompertz model In this model introduced in Gompertz (1825b), we have that the force of mortality is given by µ(k|θ) = aebk, with θ = (a, b)′ ∈ R2 +.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' So for the gradient vector we have ∂ℓ(θ|D) ∂a = m � k=1 � Dk aebk − Ek � ebk ∂ℓ(θ|D) ∂b = m � k=1 � Dk aebk − Ek � akebk 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='3 Makeham model In this model introduced in Makeham (1860), we have that the force of mortality is given by µ(k|θ) = aebk + c, with θ = (a, b, c)′ ∈ R3 +.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' So for the gradient vector we have ∂ℓ(θ|D) ∂a = m � k=1 � Dk aebk + c − Ek � ebk ∂ℓ(θ|D) ∂b = m � k=1 � Dk aebk + c − Ek � akebk ∂ℓ(θ|D) ∂c = m � k=1 � Dk aebk + c − Ek � 5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='4 Perks model In this model introduced in Perks (1932), we have that the force of mortality is given by µ(k|θ) = γ + aebk 1 + δebk with θ = (a, b, γ, δ)′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' So for the gradient vector we have ∂ℓ(θ|D) ∂a = m � k=1 � Dk � 1 + δebk γ + aebk � − Ek � ebk 1 + δebk ∂ℓ(θ|D) ∂b = m � k=1 � Dk � 1 + δebk γ + aebk � − Ek � k(a − δγ)ebk (1 + δebk)2 ∂ℓ(θ|D) ∂γ = m � k=1 � Dk � 1 + δebk γ + aebk � − Ek � 1 1 + δebk ∂ℓ(θ|D) ∂δ = m � k=1 � Dk � 1 + δebk γ + aebk � − Ek � ebk � aebk + γ � (1 + δebk)2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='5 Mixture model As with Makeham, we will seek to decompose mortality into two components: premature and senescent mortality, respectively modeled by an exponential and a Gompertz component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' However, Makeham dis- tinguishes these components through mortality force, and here we propose to distinguish them through distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Therefore, we are considering that the r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' T introduced at the beginning of this session is associated with a probability density function f, which is define as: f(x|θ) = p � λe−λx� + (1 − p) � ab exp � a � ebx − 1 � + bx �� (3) with θ = (a, b, λ, p)′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The density f is a Gompertz and a exponential distribution a mixture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The Gompertz distribution will fit the senescence deaths count, and the exponential distribution will fit the premature deaths, such as accidents and disease.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Briefly, this model considers the existence of two sub populations in the death count, one 6 Gompertz and the other Exponential, and the parameters p and q = 1 − p represent the proportions of each one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Since the random variable T is associated with a density function, we can also associate it with a hazard function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In this case the force of mortality is defined by: µ(x|θ) = f(x|θ) S(x|θ) = p � λe−λx� + (1 − p) � ab exp � a � ebx − 1 � + bx �� pe−λx + (1 − p) exp{−a (ebx − 1)} , (4) for which there is no straightforward interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Which is lost due to the ease of deriving functions such as statistical moments and expected average residual life (for more details, see Finkelstein (2009)) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='From 7 this we can get the gradient vector which,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' for this model,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' is given by ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='∂ℓ(θ|D) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='∂a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='= ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='m ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='k=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Dk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='pe−λk + (1 − p) exp{−a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ebk − 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='} ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='× b(1 − p)ea(keb−1)+bλ + ab(1 − p)(keb − 1)ea(keb−1)+bx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='pe−λk + (1 − p) exp{−a (ebk − 1)} ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='+ (−1)(1 − p)(1 − ebk)e−a(ebk−1) � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='p ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='λe−λk� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='+ (1 − p) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ab exp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ebk − 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='+ bk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='��� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='(pe−λk + (1 − p) exp{−a (ebk − 1)})2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='∂ℓ(θ|D) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='∂b ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='= ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='m ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='k=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Dk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='pe−λk + (1 − p) exp{−a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ebk − 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='} ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='a(1 − p)xebx−a(ebx−1) � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ab(1 − p)ea(xebx−1)+bx + λpe−λx� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='(1 − p)ea(ebx−1) + pe−λx�2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='+ a(1 − p)ea(ebx−1)+bx + ab(1 − p)ea(ebx−1)+bx � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='axeb + x ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='(1 − p)ea(ebx−1) + pe−λx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='∂ℓ(θ|D) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='∂λ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='= ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='m ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='k=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Dk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='pe−λk + (1 − p) exp{−a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ebk − 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='} ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='pe−λx − λpxe−λx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='(1 − p)e−a(ebx−1) + pe−λx + ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='pxe−λx � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ab(1 − p)ea(xeb−1)+bx + λpe−λx� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='(1 − p)e−a(ebx−1) + pe−λx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='�2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='∂ℓ(θ|D) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='∂p ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='= ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='m ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='k=1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Dk ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='pe−λk + (1 − p) exp{−a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ebk − 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='} ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='�� λe−λx − abea(xeb−1)+bx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='(1 − p)e−a(ebx−1) + pe−λx − ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='e−λx − e−a(ebx−1)� � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ab(1 − p)ea(xeb−1)+bx + λpe−λx� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='(1 − p)e−a(ebx−1) + pe−λx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='�2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Data and empirical results ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='In order to evaluate the proposed model,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' we will compare its performance on high and low-quality data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' For this, we will evaluate its performance against four other models, using the Mean Absolute Percentage Error (MAPE) combined with the leave-one-out cross-validation method, which will measure the average distance between the log-hazard and the log-mortality rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Moreover, as some models are highly nonlinear, 8 the Genetic Algorithm (Scrucca, 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Mirjalili, 2019) will be used to maximize the likelihood function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' This algorithm ensures convergence to the global maximum value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1 Models comparison In a high quality data setting In this scenario, we will use mortality data from Japan in 2015 obtained from The Human Mortality Database (HMD).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The observed value of log µ is linearly increasing to a certain age, and then has a sharp drop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' This behavior was also noted this country in the last three decades.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' However this is not restricted to Japan, other countries like Sweden, Germany, USA and Korea also had the same mortality behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The Figure 1 shows the estimated log-hazard function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' We can clearly see the models of Beard, Gompertz, Makeham and Perks were not able to fit properly the mortality rate after age 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 70 80 90 100 110 −5 −4 −3 −2 −1 0 1 Age log(µ) Beard Gompertz Makeham Perks Mixture model Figure 1: Japan 2015 modeling The Gompertz model consider force of mortality being log-linear, but clearly this behavior does not describe the entire observed curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' For this model the estimated parameter is �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0179, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1094)′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' And this model has a MAPE of 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0127, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', this model’s predictions are on average 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0127 % distant of the observed value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' A similar result can be obtained from the Makeham model, which has estimated parameter �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0174, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1103, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0008)′, and MAPE 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0288.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The Beard can be seen as the ratio of a Gompertz and a Makeham models with c = 1, with the pa- rameters estimated by ML �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0165, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1216, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0073)′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Despite Beard’s combination of Makeham and Gompertz models, this model provided the worse fit, reaching a MAPE of 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='6189.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The Perks model also has a similar construction to Beard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' It is the ratio between two Makeham models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' For this model we estimate �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0135, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1313, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0040, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0075)′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' And as expected, this model had a very 9 similar behavior to the previous model, including in MAPE of 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='3591, suggesting that this model does not fit well to the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Finally, for the proposed mixture-based model, we estimated �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1155, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0163, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='2061, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0126)′, and a MAPE of 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='9193, the best of the models presented in this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In addition, this model was the only one that was able to capture the sharp drop in the mortality rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' With the estimated parameters we can interpret that the non-senescence death represents 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='2599 % of the total death after age 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In a low quality data setting We observed that the model works well on data that has good quality, and now we aim to understand how the model behaves when the data has limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In this case we are going to use data from Brazil from 2010 (Queiroz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Gonzaga & Schmertmann, 2016) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Previous studies showa a mortality crossover above age 60 when comparing more and less developed states in Brazil using the Topals model (Queiroz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Gonzaga & Schmertmann, 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' It is argued that the result is related to the level of complete- ness of death counts, age misreporting and mortality selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Thus, it is an important and relevant case study for the application of our prosed mixture model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' For this, as before, we will compare the performance of the 5 models presented through MAPE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 70 75 80 85 90 95 100 −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='5 −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='5 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 Age log(µ) Beard Gompertz Makeham Perks Mixture model Figure 2: Brazil 2010 modeling For the first model (Beard) we estimated �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0375, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0942, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='5625 × 10−8)′, and a MAP of 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='4629, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', on average this model distanced by 20 % of the mortality rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' We also got a similar conclusion about the Gompertz model, estimating �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0375, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0943)′ and MAPE about 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='4499.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The Makeham and Perks models also obtained similar results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' For Makeham it was estimated �θ = 10 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01481, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1338, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='03131)′ resulting in a MAP of 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='5473, and for Perks model it was estimated �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0163, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0129, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0290, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='4272 × 10−7)′ which results in MAPE of 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='9002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Finally, for the proposed model was estimated �θ = (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1036, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0315, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='2389, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0692)′, and a MAPE of 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0038%, which indicates that the model is not able to capture mortality well in these data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Therefore, the results found in this application match the results discussed in Feehan (2018) on the power of models capturing mortality at advanced ages universally.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='2 Model applications As we have seen, the proposed model has a high capacity to fit the mortality at older ages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Therefore, we will illustrate the power of this model by applying it to mortality data from Japan (1993 and 2002), Sweden (2011), Germany (2016), USA (1990 and 1992), Spain (2012) and Italy (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Table 1 represents the estimate for each dataset, and Figure 4 represents their respective decomposed distribution of death.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Table 1: Parameters estimated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Country Year ˆa ˆb ˆc ˆp MAPE Japan 1993 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='10911 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='02916 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='21615 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='00250 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='86459 Japan 2002 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='10897 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='02425 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='30152 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='03276 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='49451 Sweden 2011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='12390 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01520 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='26448 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01559 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='27019 Germany 2016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='11046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='02090 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='22283 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='00397 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='68258 USA 1990 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='08845 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='03569 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='20360 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='02569 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80694 USA 1992 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='09057 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='03404 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='20575 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='03217 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='91887 Spain 2012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='12372 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01544 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='22751 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01307 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='38755 Italy 2011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='11606 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01768 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='21710 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01999 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='24385 In Table 1 it can be seen that the estimated values for p are small, less than 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='04, which indicates that the proportion of premature deaths above age 70 does not exceed 4%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' This result was already expected, since by truncating the mortality data at age 70, we are excluding infant mortality and mortality hump (Remund et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2018), and we only observe the tail of the distribution of premature mortality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Furthermore, the our result is also in agreement with Horiuchi & Wilmoth’s results, that above age 75 mortality decelerates for most causes of death (Horiuchi & Wilmoth, 1997).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The estimated values for the c parameter are similar, and concentrated around 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' This suggests that, despite having different proportions, the distributions of premature death are similar, as can be seen on the left in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Such similarity was not observed in the senescent death distributions, which have a marked difference, as can be seen on the right in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Despite this, it is clear that the modal age of death is between 80 and 90, which is consistent with previous studies and (Horiuchi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 11 70 80 90 100 110 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='30 Age prematur mortality distribution JPN − 1993 JPN − 2002 SWE − 2011 DEUTNP − 2016 USA − 1990 USA − 1992 ESP − 2012 ITA − 2011 70 80 90 100 110 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='05 Age senescence mortality distribution JPN − 1993 JPN − 2002 SWE − 2011 DEUTNP − 2016 USA − 1990 USA − 1992 ESP − 2012 ITA − 2011 Figure 3: Estimates of mortality components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='10000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='20000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='30000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='JPN − 1993 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Age ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='10000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='20000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='30000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='JPN − 2002 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Age ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='3000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='SWE − 2011 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Age ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='10000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='20000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='30000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='DEUTNP − 2016 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Age ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='20000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='40000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='60000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='USA − 1990 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Age ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='20000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='40000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='60000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='USA − 1992 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Age ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='5000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='10000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='15000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ESP − 2012 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Age ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='5000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='15000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='25000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='ITA − 2011 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Age ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='dx ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Senescence deaths ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Premature deaths ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Overall deaths ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='Figure 4: Estimations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='The Figure 4 shows the distribution of death estimated and broken down into premature and senescent ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='deaths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In it, we can observe the quality of fit of the estimated model (black line).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In addition, it is possible 12 to see that for Japan in 1993 and Germany in 2016, there were practically no premature deaths after age 70, this could also be inferred from analyzing the Table 1, where the values of estimate for p are small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 4 Conclusions and future works Robust estimates of mortality rates in advanced ages are a challenge for demographers for various reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Even in populations with good records of deaths and population there are disturbances in the function of the low number of events and/or some limitation in the information on the age of death.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In case of countries where the problems of data quality is present, the challenges are greater.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' For some centuries there has been an ambition to decompose mortality into interpretable components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The best known are those proposed by Makeham (1860) and Heligman & Pollard (1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' However, in recent years researchers have devoted to this problem (Remund et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Mazzuco et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Therefore, this paper aims to bring a contribution to this discussion, delivering a new parametric model capable of decomposing mortality through mixing models in a frequentest framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Mazzuco et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2021) proposes an approach similar to the one proposed in this paper, however the authors use a Bayesian framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' As we have seen, the proposed model fits well the mortality curve, specially above age 100, and this model does it without overparametrization, as Heligman & Pollard (1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Furthermore, as it is a mixture model, the model is flexible to become the Gompertz model (p = 0), or the Exponential model (p = 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' When 0 < p < 1, the model fits a mortality curve with inflexion point (mortality deceleration) and plateau (mortality plateau).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The use of Brazilian mortality data shed light on the performance of the model in a low quality database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' We could see that the mixture-based model captures the dynamics of mortality well only when there is a drop in mortality rates, serving as an alternative to models that do not have this characteristic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Although the present work presents a model capable of capturing the specific dynamics of the force of mortality in certain populations, it also sheds light on other problems to be solved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Since the model is based on mixtures of distributions, we are interested in deriving hypothesis tests on the estimated parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' One of the main ones is to test if p = 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' whether the model can be reduced to a Gompertz model;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' similar interest to that studied in B¨ohnstedt & Gampe (2019), when a hypothesis test for Gamma heterogeneity is derived, and important statistical properties are studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Finally, in the recently published paper Vaupel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2022) point out that estimating senescence mor- tality is of fundamental importance to understand the pace of human aging, human longevity and how far we can live.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In this sense, this work brought a method capable of identifying and estimating senescent mor- tality, without having a great computational cost, often seen in Bayesian analysis (See Barber et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2015)), or overparameterized models, as seen in Heligman & Pollard (1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 13 Bibliography Barber, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Voss, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Webster, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The rate of convergence for approximate bayesian computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Electronic Journal of Statistics, 9(1), 80–105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Barbi, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Lagona, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Marsili, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Vaupel, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Wachter, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The plateau of human mortality: Demography of longevity pioneers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Science, 360(6396), 1459–1461.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Beard, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1959).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Note on some mathematical mortality models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In Ciba Foundation Symposium-The Lifespan of Animals (Colloquia on Ageing), volume 5 (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 302–311).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' : Wiley Online Library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Black, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Hsu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Sanders, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Schofield, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Taylor, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The methuselah effect: The pernicious impact of unreported deaths on old-age mortality estimates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Demography, 54(6), 2001–2024.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' B¨ohnstedt, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' & Gampe, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Detecting mortality deceleration: Likelihood inference and model selection in the gamma-gompertz model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Statistics & Probability Letters, 150, 68–73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Brillinger, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1986).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The natural variability of vital rates and associated statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Biometrics, 42(4), 693–734.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Cutler, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Deaton, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Lleras-Muney, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The determinants of mortality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Journal of economic perspectives, 20(3), 97–120.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Feehan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Separating the signal from the noise: evidence for deceleration in old-age death rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Demography, 55(6), 2025–2044.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Finkelstein, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Understanding the shape of the mixture failure rate (with engineering and demo- graphic applications).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Applied Stochastic Models in Business and Industry, 25(6), 643–663.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Gavrilov, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' & Gavrilova, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Late-life mortality is underestimated because of data errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' PLoS biology, 17(2), e3000148.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Gomes, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' & Turra, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The number of centenarians in brazil: indirect estimates based on death certificates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Demographic Research, 20, 495–502.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Gompertz, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1825a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Xxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' on the nature of the function expressive of the law of human mortality, and on a new mode of determining the value of life contingencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' in a letter to francis baily, esq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' frs &c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Philosophical transactions of the Royal Society of London, (115), 513–583.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 14 Gompertz, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1825b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Xxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' on the nature of the function expressive of the law of human mortality, and on a new mode of determining the value of life contingencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' in a letter to francis baily, esq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' frs &c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Philosophical transactions of the Royal Society of London, 0(115), 513–583.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Gonzaga, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' & Schmertmann, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Estimating age-and sex-specific mortality rates for small areas with topals regression: an application to brazil in 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Revista Brasileira de Estudos de Populac¸˜ao, 33, 629–652.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Graunt, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1662).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Natural and political observations mentioned in a following index, and made upon the bills of mortality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In Mathematical Demography (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 11–20).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Heligman, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' & Pollard, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The age pattern of mortality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Journal of the Institute of Actuaries, 107(1), 49–80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Horiuchi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Ouellette, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Cheung, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Robine, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Modal age at death: lifespan indicator in the era of longevity extension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Vienna Yearbook of Population Research, (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 37–69).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Horiuchi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' & Wilmoth, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1997).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Age patterns of the life table aging rate for major causes of death in japan, 1951–1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The Journals of Gerontology Series A: Biological Sciences and Medical Sciences, 52(1), B67–B77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Makeham, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1860).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' On the law of mortality and the construction of annuity tables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Journal of the Institute of Actuaries, 8(6), 301–310.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Mazzuco, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Suhrcke, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Zanotto, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' How to measure premature mortality?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' a proposal combining “relative” and “absolute” approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Population health metrics, 19(1), 1–14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Mirjalili, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Genetic algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' In Evolutionary algorithms and neural networks (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 43–55).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Nepomuceno, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Turra, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The population of centenarians in Brazil: historical estimates from 1900 to 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Technical report, Max Planck Institute for Demographic Research, Rostock, Ger- many.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Perks, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (1932).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' On some experiments in the graduation of mortality statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Journal of the Institute of Actuaries, 63(1), 12–57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Pinheiro, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' & Queiroz, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Regional disparities in brazilian adult mortality: an analysis using modal age at death (m) and compression of mortality (iqr).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Anais, (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 1–20).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 15 Queiroz, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Gonzaga, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Vasconcelos, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Lopes, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Abreu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Comparative analysis of completeness of death registration, adult mortality and life expectancy at birth in brazil at the subnational level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Population health metrics, 18(1), 1–15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Remund, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Camarda, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Riffe, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Analyzing the young adult mortality hump in r with morthump.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Rostock: Max Planck Institute for Demographic Research (MPIDR Technical Report TR- 2018-003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Remund, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Camarda, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Riffe, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' A cause-of-death decomposition of young adult excess mortality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Demography, 55(3), 957–978.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Scrucca, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' GA: A package for genetic algorithms in R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Journal of Statistical Software, 53(4), 1–37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' van Raalte, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' What have we learned about mortality patterns over the past 25 years?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Population Studies, 75(sup1), 105–132.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Vaupel, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' The Pull of the Plateau and the Sway of the Mode: Formal Relationships to Estimate the Pace of Senescence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Technical report, Center for Open Science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Vaupel, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Villavicencio, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Bergeron-Boucher, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Demographic perspectives on the rise of longevity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences, 118(9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Wachter, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Hypothetical errors and plateaus: A response to newman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' PLoS biology, 16(12), e3000076.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Wilmoth, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Zureick, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Canudas-Romo, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', Inoue, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=', & Sawyer, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' A flexible two-dimensional mortality model for use in indirect estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Population studies, 66(1), 1–28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Wilmoth, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2000).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Demography of longevity: past, present, and future trends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Experimental gerontol- ogy, 35(9-10), 1111–1129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Wrigley-Field, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Mortality deceleration and mortality selection: three unexpected implications of a simple model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' Demography, 51(1), 51–71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} +page_content=' 16' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3tAzT4oBgHgl3EQfuf3H/content/2301.01693v1.pdf'} diff --git a/5NFIT4oBgHgl3EQf7itm/content/2301.11398v1.pdf b/5NFIT4oBgHgl3EQf7itm/content/2301.11398v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d1906be23ed88f6451c8bce9c547844a66a16d31 --- /dev/null +++ b/5NFIT4oBgHgl3EQf7itm/content/2301.11398v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0af6ac12e85736f6ec662252891b06a672016d4ccd614c5506042172bf99333 +size 167907 diff --git a/5NFIT4oBgHgl3EQf7itm/vector_store/index.faiss b/5NFIT4oBgHgl3EQf7itm/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..7c7f5f5201f431e65a40a0eecc024069670f2ef5 --- /dev/null +++ b/5NFIT4oBgHgl3EQf7itm/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58c273ab7d0f1f50c47cf0bded92781c6b77e1552f222e318279db3eeab0605f +size 2162733 diff --git a/5NFIT4oBgHgl3EQf7itm/vector_store/index.pkl b/5NFIT4oBgHgl3EQf7itm/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ba1ddd8d43dff6c42032a5335fc63a241f87c7c3 --- /dev/null +++ b/5NFIT4oBgHgl3EQf7itm/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e61d1b452ae3b3789cbfe10d69aefd71423cc69edc5d32d107dc6f8671a99325 +size 76066 diff --git a/5NFKT4oBgHgl3EQf-C45/content/tmp_files/2301.11956v1.pdf.txt b/5NFKT4oBgHgl3EQf-C45/content/tmp_files/2301.11956v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..1dcdba6412d70d2a957080f73d242a9d49668d6d --- /dev/null +++ b/5NFKT4oBgHgl3EQf-C45/content/tmp_files/2301.11956v1.pdf.txt @@ -0,0 +1,2309 @@ +On the Connection Between MPNN and Graph Transformer +Chen Cai 1 Truong Son Hy 1 Rose Yu 1 Yusu Wang 1 +Abstract +Graph Transformer (GT) recently has emerged +as a new paradigm of graph learning algorithms, +outperforming the previously popular Message +Passing Neural Network (MPNN) on multiple +benchmarks. Previous work (Kim et al., 2022) +shows that with proper position embedding, GT +can approximate MPNN arbitrarily well, implying +that GT is at least as powerful as MPNN. In this +paper, we study the inverse connection and show +that MPNN with virtual node (VN), a commonly +used heuristic with little theoretical understand- +ing, is powerful enough to arbitrarily approximate +the self-attention layer of GT. +In particular, we first show that if we consider +one type of linear transformer, the so-called Per- +former/Linear Transformer (Choromanski et al., +2020; Katharopoulos et al., 2020b), then MPNN ++ VN with only O(1) depth and O(1) width +can approximate a self-attention layer in Per- +former/Linear Transformer. Next, via a connec- +tion between MPNN + VN and DeepSets, we +prove the MPNN + VN with O(nd) width and +O(1) depth can approximate the self-attention +layer arbitrarily well, where d is the input fea- +ture dimension. Lastly, under some assumptions, +we provide an explicit construction of MPNN + +VN with O(1) width and O(n) depth approxi- +mating the self-attention layer in GT arbitrarily +well. On the empirical side, we demonstrate that +1) MPNN + VN is a surprisingly strong baseline, +outperforming GT on the recently proposed Long +Range Graph Benchmark (LRGB) dataset, 2) our +MPNN + VN improves over early implementation +on a wide range of OGB datasets and 3) MPNN + +VN outperforms Linear Transformer and MPNN +on the climate modeling task. +1University of California San Diego, San Diego, USA. Corre- +spondence to: Chen Cai . +Copyright 2023 by the author(s). +VN +Transformer +(a) +(b) +Figure 1: MPNN + VN and Graph Transformers. +1. Introduction +MPNN (Message Passing Neural Network) (Gilmer et al., +2017) has been the leading architecture for processing graph- +structured data. Recently, transformers in natural language +processing (Vaswani et al., 2017; Kalyan et al., 2021) and +vision (d’Ascoli et al., 2021; Han et al., 2022) have extended +their success to the domain of graphs. There have been +several pieces of work (Ying et al., 2021; Wu et al., 2021; +Kreuzer et al., 2021; Rampášek et al., 2022; Kim et al., 2022) +showing that with careful position embedding (Lim et al., +2022), graph transformers (GT) can achieve compelling +empirical performances on large-scale datasets and start to +challenge the dominance of MPNN. +MPNN imposes a sparsity pattern on the computation graph +and therefore enjoys linear complexity. It however suffers +from well-known over-smoothing (Li et al., 2018; Oono +& Suzuki, 2019; Cai & Wang, 2020) and over-squashing +(Alon & Yahav, 2020; Topping et al., 2021) issues, limiting +its usage on long-range modeling tasks where the label of +one node depends on features of nodes far away. GT relies +purely on position embedding to encode the graph structure +and uses vanilla transformers on top. 1 It models all pairwise +interactions directly in one layer, making it computationally +more expensive. Compared to MPNN, GT shows promising +results on tasks where modeling long-range interaction is +the key, but the quadratic complexity of self-attention in GT +1GT in this paper refers to the practice of tokenizing graph +nodes and applying standard transformers on top (Ying et al., 2021; +Kim et al., 2022). There exists a more sophisticated GT (Kreuzer +et al., 2021) that further conditions attention on edge types but it is +not considered in this paper. +arXiv:2301.11956v1 [cs.LG] 27 Jan 2023 + +On the Connection Between MPNN and Graph Transformer +limits its usage to graphs of medium size. Scaling up GT +to large graphs remains an active research area (Wu et al., +2022). +Theoretically, it has been shown that graph transformers can +be powerful graph learners (Kim et al., 2022), i.e., graph +transformers with appropriate choice of token embeddings +have the capacity of approximating linear permutation equiv- +ariant basis, and therefore can approximate 2-IGN (Invariant +Graph Network), a powerful architecture that is at least as +expressive as MPNN (Maron et al., 2018). This raises an +important question that whether GT is strictly more powerful +than MPNN. Can we approximate GT with MPNN? +One common intuition of the advantage of GT over MPNN +is its ability to model long-range interaction more effectively. +However, from the MPNN side, one can resort to a simple +trick to escape locality constraints for effective long-range +modeling: the use of an additional virtual node (VN) that +connects to all input graph nodes. On a high level, MPNN ++ VN augments the existing graph with one virtual node, +which acts like global memory for every node exchanging +messages with other nodes. Empirically this simple trick has +been observed to improve the MPNN and has been widely +adopted (Gilmer et al., 2017; Hu et al., 2020; 2021) since +the early beginning of MPNN (Gilmer et al., 2017; Battaglia +et al., 2018). However, there is very little theoretical study +of MPNN + VN (Hwang et al., 2022). +In this work, we study the theoretical property of MPNN ++ VN, and its connection to GT. We systematically study +the representation power of MPNN + VN, both for certain +approximate self-attention and for the full self-attention +layer, and provide a depth-width trade-off, summarized in +Table 1. In particular, +• With O(1) depth and O(1) width, MPNN + VN +can approximate one self-attention layer of Performer +(Choromanski et al., 2020) and Linear Transformer +(Katharopoulos et al., 2020b), a type of linear trans- +formers (Tay et al., 2020). +• Via a link between MPNN + VN with DeepSets (Za- +heer et al., 2017), we prove MPNN + VN with O(1) +depth and O(nd) width (d is the input feature dimen- +sion) is permutation equivariant universal, implying +it can approximate self-attention layer and even full- +transformers. +• Under certain assumptions on node features, we prove +an explicit construction of O(n) depth O(1) width +MPNN + VN approximating 1 self-attention layer ar- +bitrarily well on graphs of size n. Unfortunately, the +assumptions on node features are rather strong, and +whether we can alleviate them will be an interesting +future direction to explore. +• Empirically, we show 1) that MPNN + VN works sur- +prisingly well on the recently proposed LRGB (long- +range graph benchmarks) datasets (Dwivedi et al., +2022), which arguably require long-range interaction +reasoning to achieve strong performance 2) our imple- +mentation of MPNN + VN is able to further improve +the early implementation of MPNN + VN on OGB +datasets and 3) MPNN + VN outperforms Linear Trans- +former (Katharopoulos et al., 2020b) and MPNN on +the climate modeling task. +2. Related Work +Virtual node in MPNN. The virtual node augments the +graph with an additional node to facilitate the information +exchange among all pairs of nodes. It is a heuristic proposed +in (Gilmer et al., 2017) and has been observed to improve +the performance in different tasks (Hu et al., 2021; 2020). +Surprisingly, its theoretical properties have received little +study. To the best of our knowledge, only a recent paper +(Hwang et al., 2022) analyzed the role of the virtual node in +the link prediction setting in terms of 1) expressiveness of +the learned link representation and 2) the potential impact +on under-reaching and over-smoothing. +Graph transformer. +Because of the great successes +of Transformers in natural language processing (NLP) +(Vaswani et al., 2017; Wolf et al., 2020) and recently in +computer vision (Dosovitskiy et al., 2020; d’Ascoli et al., +2021; Liu et al., 2021), there is great interest in extending +transformers for graphs. One common belief of advantage +of graph transformer over MPNN is its capacity in capturing +long-range interactions while alleviating over-smoothing (Li +et al., 2018; Oono & Suzuki, 2019; Cai & Wang, 2020) and +over-squashing in MPNN (Alon & Yahav, 2020; Topping +et al., 2021). +Fully-connected Graph transformer (Dwivedi & Bresson, +2020) was introduced with eigenvectors of graph Laplacian +as the node positional encoding (PE). Various follow-up +works proposed different ways of PE to improve GT, ranging +from an invariant aggregation of Laplacian?s eigenvectors +in SAN (Kreuzer et al., 2021), pair-wise graph distances in +Graphormer (Ying et al., 2021), relative PE derived from dif- +fusion kernels in GraphiT (Mialon et al., 2021), and recently +Sign and Basis Net (Lim et al., 2022) with a principled way +of handling sign and basis invariance. Other lines of re- +search in GT include combining MPNN and GT (Wu et al., +2021; Rampášek et al., 2022), encoding the substructures +(Chen et al., 2022), and efficient graph transformers for +large graphs (Wu et al., 2022). + +On the Connection Between MPNN and Graph Transformer +Table 1: Summary of approximation result of MPNN + VN on self-attention layer. n is the number of nodes and d is the +feature dimension of node features. The dependency on d is hidden. +Depth +Width +Self-Attention +Note +Theorem 4.1 +O(1) +O(1) +Approximate +Approximate self attention in Performer (Choromanski et al., 2020) +Theorem 5.5 +O(1) +O(nd) +Full +Leverage the universality of equivariant DeepSets +Theorem 6.3 +O(n) +O(1) +Full +Explicit construction, strong assumption on X +Proposition B.10 +O(n) +O(1) +Full +Explicit construction, more relaxed (but still strong) assumption on X +3. Preliminaries +We denote X ∈ Rn×d the concatenation of graph node +features and positional encodings, where node i has feature +xi ∈ Rd. When necessary, we use x(l) +j +to denote the node +j’s feature at depth l. Let M be the space of multisets of +vectors in Rd. We use X ⊆ Rn×d to denote the space of +node features and the Xi be the projection of X on i-th +coordinate. ∥ · ∥ denotes the 2-norm. [x, y, z] denotes the +concatenation of x, y, z. [n] stands for the set {1, 2, ..., n}. +Definition 3.1 (attention). We denote key and query matrix +as WK, WQ ∈ Rd×d′, and value matrix as WV ∈ Rd×d +2. Attention score between two vectors u, v ∈ Rd×1 is de- +fined as α(u, v) = softmax(uT WQ(WK)T v). We denote +A as the space of attention α for different WQ, WK, WV . +We also define unnormalized attention score α′(·, ·) to be +α′(u, v) = uT WQ(WK)T v. Self attention layer is a ma- +trix function L : Rn×d → Rn×d of the following form: +L(X) = softmax(XWQ(XWK)T )XWV . +3.1. MPNN Layer +Definition 3.2 (MPNN layer (Gilmer et al., 2017)). An +MPNN layer on a graph G with node features x(k) at k-th +layer and edge features e is of the following form +x(k) +i += γ(k) � +x(k−1) +i +, τj∈N (i)φ(k) � +x(k−1) +i +, x(k−1) +j +, ej,i +�� +Here γ : Rd × Rd′ → Rd is update function, φ : Rd × +Rd × Rde → Rd′ is message function where de is the edge +feature dimension, τ : M → Rd is permutation invariant +aggregation function and N(i) is the neighbors of node i +in G. Update/message/aggregation functions are usually +parametrized by neural networks. For graphs of different +types of edges and nodes, one can further extend MPNN to +the heterogeneous setting. We use 1, ..., n to index graph +nodes and vn to denote the virtual node. +Definition 3.3 (heterogeneous MPNN + VN layer). The +heterogeneous MPNN + VN layer operates on two types +2For simplicity, we assume the output dimension of self- +attention is the same as the input dimension. All theoretical results +can be extended to the case where the output dimension is different +from d. +of nodes: 1) virtual node and 2) graph nodes, denoted as +vn and gn, and three types of edges: 1) vn-gn edge and 2) +gn-gn edges and 3) gn-vn edges. It has the following form +x(k) +vn = γ(k) +vn +� +x(k−1) +i +, τj∈[n]φ(k) +vn-gn +� +x(k−1) +i +, x(k−1) +j +, ej,i +�� +(1) +for the virtual node, and +x(k) +i += γ(k) +gn (x(k−1) +i +, τj∈N1(i)φ(k) +gn-vn +� +x(k−1) +i +, x(k−1) +j +, ej,i +� ++ τj∈N2(i)φ(k) +gn-gn +� +x(k−1) +i +, x(k−1) +j +, ej,i) +� +(2) +for graph node. Here N1(i) for graph node i is the virtual +node and N2(i) is the set of neighboring graph nodes. +Our proof of approximating self-attention layer L with +MPNN layers does not use the graph topology. Next, we +introduce a simplified heterogeneous MPNN + VN layer, +which will be used in the proof. It is easy to see that set- +ting φ(k) +gn-gn to be 0 in Definition 3.3 recovers the simplified +heterogeneous MPNN + VN layer. +Definition 3.4 (simplified heterogeneous MPNN + VN +layer). A simplified heterogeneous MPNN + VN layer is +the same as a heterogeneous MPNN + VN layer in Defini- +tion 3.3 except we set θgn-gn to be 0. I.e., we have +x(k) +vn = γ(k) +vn +� +x(k−1) +i +, τj∈[n]φ(k) +vn-gn +� +x(k−1) +i +, x(k−1) +j +, ej,i +�� +for the virtual node, and +x(k) +i += γ(k) +gn +� +x(k−1) +i +, τj∈N1(i)φ(k) +gn-vn +� +x(k−1) +i +, x(k−1) +j +, ej,i +�� +for graph nodes. +Intuitively, adding the virtual node (VN) to MPNN makes it +easy to compute certain quantities, for example, the mean +of node features (which is hard for standard MPNN unless +the depth is proportional to the diameter of the graph). Us- +ing VN thus makes it easy to implement for example the +mean subtraction, which helps reduce over-smoothing and +improves the performance of GNN. (Yang et al., 2020; Zhao +& Akoglu, 2019) + +On the Connection Between MPNN and Graph Transformer +3.2. Assumptions +We have two mild assumptions on feature space X ⊂ Rn×d +and the regularity of target function L. +AS1. ∀i ∈ [n], xi ∈ Xi, ∥xi∥ < C1. This implies X is +compact. +AS2. ∥WQ∥ < C2, ∥WK∥ < C2, ∥WV ∥ < C2 for target +layer L. Combined with AS1 on X, this means α′(xi, xj) +is both upper and lower bounded, which further implies +� +j eα′(xi,xj) be both upper bounded and lower bounded. +4. O(1)-depth O(1)-width MPNN + VN for +unbiased approximation of attention +The standard self-attention takes O(n2) computational time, +therefore not scalable for large graphs. Reducing the compu- +tational complexity of self-attention in Transformer is active +research (Tay et al., 2020). In this section, we consider +self-attention in a specific type of efficient transformers, Per- +former (Choromanski et al., 2020) and Linear Transformer +(Katharopoulos et al., 2020b). +One full self-attention layer L is of the following form +x(l+1) +i += +n +� +j=1 +κ +� +W (l) +Q x(l) +i , W (l) +K x(l) +j +� +�n +k=1 κ +� +W (l) +Q x(l) +i , W (l) +K x(l) +k +�· +� +W (l) +V x(l) +j +� +(3) +where κ +: +Rd × Rd +→ +R is the softmax kernel +κ(x, y) := exp(xT y). The kernel function can be ap- +proximated via κ(x, y) = ⟨Φ(x), Φ(y)⟩V ≈ φ(x)T φ(y) +where the first equation is by Mercer’s theorem and +φ(·) : Rd → Rm is a low-dimensional feature map +with random transformation. For Performer (Choroman- +ski et al., 2020), the choice of φ is taken as φ(x) = +exp +� +−∥x∥2 +2 +2 +� +√m +� +exp +� +wT +1 x +� +, · · · , exp +� +wT +mx +�� +where wk ∼ +N (0, Id) is i.i.d sampled random variable. For Linear Trans- +former (Katharopoulos et al., 2020b), φ(x) = elu(x) + 1. +By switching κ(x, y) to be φ(x)T φ(y), and denote qi = +W (l) +Q x(l) +i , ki = W (l) +K x(l) +i +and vi = W (l) +V x(l) +i , the approx- +imated version of Equation (3) by Performer and Linear +Transformer becomes +x(l+1) +i += +n +� +j=1 +φ (qi)T φ (kj) +�n +k=1 φ (qi)T φ (kk) +· vj += +� +φ (qi)T �n +j=1 φ (kj) ⊗ vj +�T +φ (qi)T �n +k=1 φ (kk) +. +(4) +where we use the matrix multiplication association rule to +derive the second equality. +The key advantage of Equation (4) is that �n +j=1 φ (kj) and +�n +j=1 φ(kj) ⊗ vj can be approximated by the virtual node, +and shared for all graph nodes, using only O(1) layers of +MPNNs. We denote the self-attention layer of this form +in Equation (4) as LPerformer. Linear Transformer differs +from Performer by choosing a different form of φ(x) = +Relu(x) + 1 in its self-attention layer LLinear-Transformer. +In particular, the VN will approximate �n +j=1 φ (kj) and +�n +j=1 φ (kj) ⊗ vj, and represent it as its feature. Both +φ (kj) and φ (kj)⊗vj can be approximated arbitrarily well +by an MLP with constant width (constant in n but can be +exponential in d) and depth. Note that φ(kj) ⊗ vj ∈ Rdm +but can be reshaped to 1 dimensional feature vector. +More specifically, the initial feature for the virtual node is +1(d+1)m, where d is the dimension of node features and m +is the number of random projections ωi. Message function ++ aggregation function for virtual node τφvn-gn : R(d+1)m × +M → R(d+1)m is +τj∈[n]φ(k) +vn-gn(·, {xi}i) = [ +n +� +j=1 +φ (kj) , +ReshapeTo1D( +n +� +j=1 +φ (kj) ⊗ vj)] +(5) +where ReshapeTo1D(·) flattens a 2D matrix to a 1D vec- +tor in raster order. This function can be arbitrarily approxi- +mated by MLP. Note that the virtual node’s feature dimen- +sion is (d + 1)m (where recall m is the dimension of the +feature map φ used in the linear transformer/Performer), +which is larger than the dimension of the graph node +d. This is consistent with the early intuition that the vir- +tual node might be overloaded when passing information +among nodes. The update function for virtual node γvn : +R(d+1)m × R(d+1)m → R(d+1)m is just coping the second +argument, which can be exactly implemented by MLP. +VN then sends its message back to all other nodes, where +each graph node i applies the update function γgn +: +R(d+1)m × Rd → Rd of the form +γgn(xi, [ +n +� +j=1 +φ (kj) , ReshapeTo1D( +n +� +j=1 +φ (kj) ⊗ vj)]) += +� +φ (qi) �n +j=1 φ (kj) ⊗ vj +�T +φ (qi)T �n +k=1 φ (kk) +(6) +to update the graph node feature. +As the update function γgn can not be computed exactly in +MLP, what is left is to show that error induced by using +MLP to approximate τφvn-gn and γgn in Equation (5) and +Equation (6) can be made arbitrarily small. +Theorem 4.1. Under the AS1 and AS2, MPNN + VN of +O(1) width and O(1) depth can approximate LPerformer and +LLinear-Transformer arbitrarily well. + +On the Connection Between MPNN and Graph Transformer +Proof. We first prove the case of LPerformer. We can decom- +pose our target function as the composition of τj∈[n]φ(k) +vn-gn, +γgn and φ. By the uniform continuity of the functions, +it suffices to show that 1) we can approximate φ, 2) we +can approximate operations in γgn and τφvn-gn arbitrar- +ily well on the compact domain, and 3) the denominator +φ (qi)T �n +k=1 φ (kk) is uniformly lower bounded by a pos- +itive number for any node features in X. +For 1), each component of φ is continuous and all inputs +kj, qj lie in the compact domain so φ can be approximated +arbitrarily well by MLP with O(1) width and O(1) depth +(Cybenko, 1989). +For 2), we need to approximate the operations in γgn and +τφvn-gn, i.e., approximate multiplication, and vector-scalar +division arbitrarily well. As all those operations are con- +tinuous, it boils down to showing that all operands lie +in a compact domain. By assumption AS1 and AS2 on +WQ, WK, WV and input feature X, we know that qi, ki, vi +lies in a compact domain for all graph nodes i. As φ is con- +tinuous, this implies that φ(qi), �n +j=1 φ(kj) ⊗ vj lies in a +compact domain (n is fixed), therefore the numerator lies +in a compact domain. Lastly, since all operations do not +involve n, the depth and width are constant in n. +For 3), it is easy to see that φ (qi)T �n +k=1 φ (kk) is always +positive. We just need to show that the denominator is bound +from below by a positive constant. For Performer, φ(x) = +exp +� +−∥x∥2 +2 +2 +� +√m +� +exp +� +wT +1 x +� +, · · · , exp +� +wT +mx +�� +where wk ∼ +N (0, Id). As all norm of input x to φ is upper bounded +by AS1, exp( −∥x∥2 +2 +2 +) is lower bounded. As m is fixed, +we know that ∥wT +i x∥ ≤ ∥wi∥∥x∥, which implies that +wT +i x is lower bounded by −∥wi∥∥x∥ which further im- +plies that exp(wT +i x) is lower bounded. This means that +φ (qi)T �n +k=1 φ (kk) is lower bounded. +For Linear Transformer, the proof is essentially the same +as above. We only need to show that φ(x) = elu(x) + 1 is +continuous and positive, which is indeed the case. +Besides Performers, there are many other different ways of +obtaining linear complexity. In Appendix C.2, we discuss +the limitation of MPNN + VN on approximating other types +of efficient transformers such as Linformer (Wang et al., +2020b) and Sparse Transformer (Child et al., 2019). +5. O(1) depth O(nd) width MPNN + VN +We have shown that the MPNN + VN can approximate self- +attention in Performer and Linear Transformer using only +O(1) depth and O(1) width. One may naturally wonder +whether MPNN + VN can approximate the self-attention +layer in the full transformer. In this section, we show that +MPNN + VN with O(1) depth (number of layers), but with +O(nd) width, can approximate 1 self-attention layer (and +full transformer) arbitrarily well. +The main observation is that MPNN + VN is able to ex- +actly simulate (not just approximate) equivariant DeepSets +(Zaheer et al., 2017), which is proved to be universal in +approximating any permutation invariant/equivariant maps +(Zaheer et al., 2017; Segol & Lipman, 2019). Since the +self-attention layer is permutation equivariant, this implies +that MPNN + VN can approximate the self-attention layer +(and full transformer) with O(1) depth and O(nd) width fol- +lowing a result on DeepSets from Segol & Lipman (2019). +We first introduce the permutation equivariant map, equiv- +ariant DeepSets, and permutation equivariant universality. +Definition 5.1 (permutation equivariant map). A map F : +Rn×k → Rn×l satisfying F (σ · X) = σ · F (X) for all +σ ∈ Sn and X ∈ Rn×d is called permutation equivariant. +Definition 5.2 (equivariant DeepSets of Zaheer et al. +(2017)). Equivariant DeepSets has the following form +F (X) = Lds +m◦ν◦· · ·◦ν◦Lds +1 (X), where Lds +i is a linear per- +mutation equivariant layer and ν is a nonlinear layer such as +ReLU. The linear permutation equivariant layer in DeepSets +has the following form Lds +i (X) = XA+ 1 +n11T XB+1cT , +where A, B ∈ Rdi×di+1, c ∈ Rdi+1 is the weights and bias +in layer i, and ν is ReLU. +Definition 5.3 (permutation equivariant universality). +Given a compact domain X of Rn×din, permutation equiv- +ariant universality of a model F : Rn×din → Rn×dout means +that for every permutation equivariant continuous function +H : Rn×din → Rn×dout defined over X, and any ϵ > 0, +there exists a choice of m (i.e., network depth), di (i.e., net- +work width at layer i) and the trainable parameters of F so +that ∥H(X) − F (X)∥∞ < ϵ for all X ∈ X. +The universality of equivariant DeepSets is stated as follows. +Theorem 5.4 (Segol & Lipman (2019)). DeepSets with con- +stant layer is universal. Using ReLU activation the width +ω := maxidi (di is the width for i-th layer of DeepSets) +required for universal permutation equivariant network sat- +isfies ω ≤ dout + din + +� n + din +din +� += O(ndin). +We are now ready to state our main theorem. +Theorem 5.5. MPNN + VN can simulate (not just approx- +imate) equivariant DeepSets: Rn×d → Rn×d. The depth +and width of MPNN + VN needed to simulate DeepSets is up +to a constant factor of the depth and width of DeepSets. This +implies that MPNN + VN of O(1) depth and O(nd) width +is permutation equivariant universal, and can approximate +self-attention layer and transformers arbitrarily well. +Proof. Equivariant DeepSets has the following form +F (X) = Lds +m ◦ ν ◦ · · · ◦ ν ◦ Lds +1 (X), where Lds +i is the + +On the Connection Between MPNN and Graph Transformer +Table 2: Baselines for Peptides-func (graph classification) and Peptides-struct (graph regression). The perfor- +mance metric is Average Precision (AP) for classification and MAE for regression. Bold: Best score. +Model +# Params. +Peptides-func +Peptides-struct +Test AP before VN +Test AP after VN ↑ Test MAE before VN Test MAE after VN ↓ +GCN +508k +0.5930±0.0023 +0.6623±0.0038 +0.3496±0.0013 +0.2488±0.0021 +GINE +476k +0.5498±0.0079 +0.6346±0.0071 +0.3547±0.0045 +0.2584±0.0011 +GatedGCN +509k +0.5864±0.0077 +0.6635±0.0024 +0.3420±0.0013 +0.2523±0.0016 +GatedGCN+RWSE +506k +0.6069±0.0035 +0.6685±0.0062 +0.3357±0.0006 +0.2529±0.0009 +Transformer+LapPE +488k +0.6326±0.0126 +- +0.2529±0.0016 +- +SAN+LapPE +493k +0.6384±0.0121 +- +0.2683±0.0043 +- +SAN+RWSE +500k +0.6439±0.0075 +- +0.2545±0.0012 +- +linear permutation equivariant layer and ν is an entrywise +nonlinear activation layer. Recall that the linear equivariant +layer has the form Lds +i (X) = XA+ 1 +n11T XB +1cT . As +one can use the same nonlinear entrywise activation layer ν +in MPNN + VN, it suffices to prove that MPNN + VN can +compute linear permutation equivariant layer Lds. Now we +show that 2 layers of MPNN + VN can exactly simulate any +given linear permutation equivariant layer Lds. +Specifically, at layer 0, we initialized the node features as +follows: The VN node feature is set to 0, while the node +feature for the i-th graph node is set up as xi ∈ Rd. +At layer 1: VN node feature is 1 +n11T X, average of node +features. The collection of features over n graph node fea- +ture is XA. We only need to transform graph node features +by a linear transformation, and set the VN feature as the +average of graph node features in the last iteration. Both +can be exactly implemented in Definition 3.4 of simplified +heterogeneous MPNN + VN. +At layer 2: VN node feature is set to be 0, and the graph node +feature is XA + 1 +n11T XB + 1cT . Here we only need to +perform the matrix multiplication of the VN feature with B, +as well as add a bias c. This can be done by implementing a +linear function for γgn. +It is easy to see the width required for MPNN + VN to +simulate DeepSets is constant. Thus, one can use 2 layers +of MPNN + VN to compute linear permutation equivariant +layer Lds +i , which implies that MPNN + VN can simulate +1 layer of DeepSets exactly with constant depth and con- +stant width (independent of n). Then by the universality of +DeepSets, stated in Theorem 5.4, we conclude that MPNN + +VN is also permutation equivariant universal, which implies +that the constant layer of MPNN + VN with O(nd) width +is able to approximate any continuous equivariant maps. +As the self-attention layer L and full transformer are both +continuous and equivariant, they can be approximated by +MPNN + VN arbitrarily well. +Thanks to the connection between MPNN + VN with +DeepSets, there is no extra assumption on X except for +being compact. The drawback on the other hand is that the +upper bound on the computational complexity needed to +approximate the self-attention with wide MPNN + VN is +worse than directly computing self-attention when d > 2. +6. O(n) depth O(1) width MPNN + VN +The previous section shows that we can approximate a full at- +tention layer in Transformer using MPNN with O(1) depth +but O(nd) width where n is the number of nodes and d is the +dimension of node features. In practice, it is not desirable +to have the width depend on the graph size. +In this section, we hope to study MPNN + VNs with O(1) +width and their ability to approximate a self-attention layer +in the Transformer. However, this appears to be much more +challenging. Our result in this section only shows that for +a rather restrictive family of input graphs (see Assumption +3 below), we can approximate a full self-attention layer +of transformer with an MPNN + VN of O(1) width and +O(n) depth. We leave the question of MPNN + VN’s ability +in approximate transformers for more general families of +graphs for future investigation. +We first introduce the notion of (V , δ) separable node fea- +tures. This is needed to ensure that VN can approximately +select one node feature to process at each iteration with +attention αvn, the self-attention in the virtual node. +Definition 6.1 ((V , δ) separable by ¯α). Given a graph G +of size n and a fixed V ∈ Rn×d = [v1, ..., vn] and ¯α ∈ A, +we say node feature X ∈ Rn×d of G is (V , δ) separable +by some ¯α if the following holds. For any node feature xi, +there exist weights W ¯α +K, W ¯α +Q in attention score ¯α such that +¯α(xi, vi) > maxj̸=i ¯α(xj, vi) + δ. We say set X is (V , δ) +separable by ¯α if every element X ∈ X is (V , δ) separable +by ¯α. +The use of (V , δ) separability is to approximate hard se- +lection function arbitrarily well, which is stated below and +proved in Appendix B.1. +Lemma 6.2 (approximate hard selection). Given X is +(V , δ) separable by ¯α for some fixed V ∈ Rn×d, ¯α ∈ A + +On the Connection Between MPNN and Graph Transformer +Table 3: Test performance in graph-level OGB benchmarks (Hu et al., 2020). Shown is the mean ± s.d. of 10 runs. +Model +ogbg-molhiv +ogbg-molpcba +ogbg-ppa +ogbg-code2 +AUROC ↑ +Avg. Precision ↑ +Accuracy ↑ +F1 score ↑ +GCN +0.7606 ± 0.0097 +0.2020 ± 0.0024 +0.6839 ± 0.0084 +0.1507 ± 0.0018 +GCN+virtual node +0.7599 ± 0.0119 +0.2424 ± 0.0034 +0.6857 ± 0.0061 +0.1595 ± 0.0018 +GIN +0.7558 ± 0.0140 +0.2266 ± 0.0028 +0.6892 ± 0.0100 +0.1495 ± 0.0023 +GIN+virtual node +0.7707 ± 0.0149 +0.2703 ± 0.0023 +0.7037 ± 0.0107 +0.1581 ± 0.0026 +SAN +0.7785 ± 0.2470 +0.2765 ± 0.0042 +– +– +GraphTrans (GCN-Virtual) +– +0.2761 ± 0.0029 +– +0.1830 ± 0.0024 +K-Subtree SAT +– +– +0.7522 ± 0.0056 +0.1937 ± 0.0028 +GPS +0.7880 ± 0.0101 +0.2907 ± 0.0028 +0.8015 ± 0.0033 +0.1894 ± 0.0024 +MPNN + VN + NoPE +0.7676 ± 0.0172 +0.2823 ± 0.0026 +0.8055 ± 0.0038 +0.1727 ± 0.0017 +MPNN + VN + PE +0.7687 ± 0.0136 +0.2848 ± 0.0026 +0.8027 ± 0.0026 +0.1719 ± 0.0013 +and δ > 0, the following holds. For any ϵ > 0 and i ∈ [n], +there exists a set of attention weights Wi,Q, Wi,K in i-th +layer of MPNN + VN such that αvn(xi, vi) > 1 − ϵ for +any xi ∈ Xi. In other words, we can approximate a hard +selection function fi(x1, ..., xn) = xi arbitrarily well on +X by setting αvn = ¯α. +With the notation set up, We now state an extra assumption +needed for deep MPNN + VN case and the main theorem. +AS3. X is (V , δ) separable by ¯α for some fixed V ∈ Rn×d, +¯α ∈ A and δ > 0. +Theorem 6.3. Assume AS 1-3 hold for the compact set X +and L. Given any graph G of size n with node features X ∈ +X, and a self-attention layer L on G (fix WK, WQ, WV +in α), there exists a O(n) layer of heterogeneous MPNN ++ VN with the specific aggregate/update/message function +that can approximate L on X arbitrarily well. +The proof is presented in the Appendix B. On the high level, +we can design an MPNN + VN where the i-th layer will +select ˜xi, an approximation of xi via attention mechanism, +enabled by Lemma 6.2, and send ˜xi to the virtual node. +Virtual node will then pass the ˜xi to all graph nodes and +computes the approximation of eα(xi,xj), ∀j ∈ [n]. Repeat +such procedures n times for all graph nodes, and finally, use +the last layer for attention normalization. A slight relaxation +of AS3 is also provided in the appendix. +7. Experiments +7.1. MPNN + VN for LRGB Datasets +We experiment with MPNN + VN for Long Range Graph +Benchmark (LRGB) datasets. Original paper (Dwivedi +et al., 2022) observes that GT outperforms MPNN on +4 out of 5 datasets, among which GT shows signifi- +cant improvement over MPNN on Peptides-func and +Peptides-struct for all MPNNs. To test the effec- +tiveness of the virtual node, we take the original code and +modify the graph topology by adding a virtual node and +keeping the hyperparameters of all models unchanged. +Results are in Table 2. +Interestingly, such a simple +change can boost MPNN + VN by a large margin on +Peptides-func and Peptides-struct. Notably, +with the addition of VN, GatedGCN + RWSE (random-walk +structural encoding) after augmented by VN outperforms +all transformers on Peptides-func, and GCN outper- +forms transformers on Peptides-struct. +7.2. Stronger MPNN + VN Implementation +Next, by leveraging the modularized implementation from +GraphGPS (Rampášek et al., 2022), we implemented a ver- +sion of MPNN + VN with/without extra positional embed- +ding. Our goal is not to achieve SOTA but instead to push +the limit of MPNN + VN and better understand the source +of the performance gain for GT. In particular, we replace +the GlobalAttention Module in GraphGPS with DeepSets, +which is equivalent to one specific version of MPNN + VN. +We tested this specific version of MPNN + VN on 4 OGB +datasets, both with and without the use of positional em- +bedding. The results are reported in Table 3. Interestingly, +even without the extra position embedding, our MPNN + +VN is able to further improve over the previous GCN + +VN & GIN + VN implementation. The improvement on +ogbg-ppa is particularly impressive, which is from 0.7037 +to 0.8055. Furthermore, it is important to note that while +MPNN + VN does not necessarily outperform GraphGPS, +which is a state-of-the-art architecture using both MPNN, +Position/structure encoding and Transformer, the difference +is quite small – this however, is achieved by a simple MPNN ++ VN architecture. +We also test MPNN + VN on large-scale molecule datasets +PCQMv2, which has 529,434 molecule graphs. We fol- +lowed (Rampášek et al., 2022) and used the original vali- +dation set as the test set, while we left out random 150K +molecules for our validation set. As we can see from Table 4, +MPNN + VN + NoPE performs significantly better than the +early MPNN + VN implementation: GIN + VN and GCN + + +On the Connection Between MPNN and Graph Transformer +Table 4: Evaluation on PCQM4Mv2 (Hu et al., 2021) dataset. For GPS evaluation, we treated the validation set of the +dataset as a test set, since the test-dev set labels are private. +Model +PCQM4Mv2 +Test-dev MAE ↓ +Validation MAE ↓ +Training MAE +# Param. +GCN +0.1398 +0.1379 +n/a +2.0M +GCN-virtual +0.1152 +0.1153 +n/a +4.9M +GIN +0.1218 +0.1195 +n/a +3.8M +GIN-virtual +0.1084 +0.1083 +n/a +6.7M +GRPE (Park et al., 2022) +0.0898 +0.0890 +n/a +46.2M +EGT (Hussain et al., 2022) +0.0872 +0.0869 +n/a +89.3M +Graphormer (Shi et al., 2022) +n/a +0.0864 +0.0348 +48.3M +GPS-small +n/a +0.0938 +0.0653 +6.2M +GPS-medium +n/a +0.0858 +0.0726 +19.4M +MPNN + VN + PE (small) +n/a +0.0942 +0.0617 +5.2M +MPNN + VN + PE (medium) +n/a +0.0867 +0.0703 +16.4M +MPNN + VN + NoPE (small) +n/a +0.0967 +0.0576 +5.2M +MPNN + VN + NoPE (medium) +n/a +0.0889 +0.0693 +16.4M +VN. The performance gap between GPS on the other hand is +rather small: 0.0938 (GPS) vs. 0.0942 (MPNN + VN + PE) +for the small model and 0.0858 (GPS) vs. 0.0867 (MPNN + +VN + PE) for the medium model. +7.3. Forecasting Sea Surface Temperature +In this experiment, we apply our MPNN + VN model to +forecast sea surface temperature (SST). We are particularly +interested in the empirical comparison between MPNN + +VN and Linear Transformer (Katharopoulos et al., 2020a) +as according to Section 4, MPNN + VN theoretically can +approximate Linear Transformer. +In particular, from the DOISST data proposed by (Huang +et al., 2021), we construct a dataset of daily SST in the +Pacific Ocean from 1982 to 2021, in the region of lon- +gitudes from 180.125◦E to 269.875◦E and latitudes from +−14.875◦N to 14.875◦N. Following the procedure from +(de Bezenac et al., 2018; de Bézenac et al., 2019) and Wang +et al. (2022), we divide the region into 11 batches of equal +size with 30 longitudes and 30 latitudes at 0.5◦-degree reso- +lution, that can be represented as a graph of 900 nodes. The +tasks are to predict the next 4 weeks, 2 weeks and 1 week +of SST at each location, given 6 weeks of historical data. +We train on data from years 1982–2018, validate on data +from 2019 and test on data from 2020–2021. The number of +training, validation, and testing examples are roughly 150K, +3K, and 7K. See details of dataset construction, model ar- +chitectures, and training scheme in Appendix D.4. +We compare our model to other baselines including TF- +Net (Wang et al., 2020a), a SOTA method for spatiotempo- +ral forecasting, Linear Transformer (Katharopoulos et al., +2020a; Wang et al., 2020b) with Laplacian positional en- +coding (LapPE), and Multilayer Perceptron (MLP). We use +Mean Square Error (MSE) as the metric and report the er- +rors on the test set, shown in the Table 5. We observe that +the virtual node (VN) alone improves upon MPNN by 3.8%, +6.6% and 4.5% in 4-, 2- and 1-week settings, respectively. +Table 5: Results of SST prediction. +Model +4 weeks +2 weeks +1 week +MLP +0.3302 +0.2710 +0.2121 +TF-Net +0.2833 +0.2036 +0.1462 +Linear Transformer + LapPE +0.2818 +0.2191 +0.1610 +MPNN +0.2917 +0.2281 +0.1613 +MPNN + VN +0.2806 +0.2130 +0.1540 +Furthermore, aligned with our theory in Section 4, MPNN + +VN indeed achieves comparable results with Linear Trans- +former and outperforms it by a margin of 0.4%, 2.8% and +4.3% in 4-, 2- and 1-week settings, respectively. +8. Concluding Remarks +In this paper, we study the expressive power of MPNN + +VN under the lens of GT. If we target the self-attention +layer in Performer and Linear Transformer, one only needs +O(1)-depth O(1) width for arbitrary approximation error. +For self-attention in full transformer, we prove that hetero- +geneous MPNN + VN of either O(1) depth O(nd) width or +O(n) depth O(1) width (under some assumptions) can ap- +proximate 1 self-attention layer arbitrarily well. Compared +to early results (Kim et al., 2022) showing GT can approx- +imate MPNN, our theoretical result draws the connection +from the inverse direction. +On the empirical side, we demonstrate that MPNN + VN +remains a surprisingly strong baseline. Despite recent ef- +forts, we still lack good benchmark datasets where GT can +outperform MPNN by a large margin. Understanding the +inductive bias of MPNN and GT remains challenging. For +example, can we mathematically characterize tasks that re- +quire effective long-range interaction modeling, and provide +a theoretical justification for using GT over MPNN (or vice +versa) for certain classes of functions on the space of graphs? +We believe making processes towards answering such ques- +tions is an important future direction for the graph learning +community. + +On the Connection Between MPNN and Graph Transformer +References +Alon, U. and Yahav, E. On the bottleneck of graph neural +networks and its practical implications. arXiv preprint +arXiv:2006.05205, 2020. +Battaglia, P. W., Hamrick, J. B., Bapst, V., Sanchez- +Gonzalez, A., Zambaldi, V., Malinowski, M., Tacchetti, +A., Raposo, D., Santoro, A., Faulkner, R., et al. Rela- +tional inductive biases, deep learning, and graph networks. +arXiv preprint arXiv:1806.01261, 2018. +Brody, S., Alon, U., and Yahav, E. How attentive are graph +attention networks? arXiv preprint arXiv:2105.14491, +2021. +Cai, C. and Wang, Y. A note on over-smoothing for graph +neural networks. arXiv preprint arXiv:2006.13318, 2020. +Chen, D., O’Bray, L., and Borgwardt, K. Structure-aware +transformer for graph representation learning. In Interna- +tional Conference on Machine Learning, pp. 3469–3489. +PMLR, 2022. +Child, R., Gray, S., Radford, A., and Sutskever, I. Gen- +erating long sequences with sparse transformers. arXiv +preprint arXiv:1904.10509, 2019. +Choromanski, K., Likhosherstov, V., Dohan, D., Song, X., +Gane, A., Sarlos, T., Hawkins, P., Davis, J., Mohiuddin, +A., Kaiser, L., et al. Rethinking attention with performers. +arXiv preprint arXiv:2009.14794, 2020. +Cybenko, G. Approximation by superpositions of a sig- +moidal function. Mathematics of control, signals and +systems, 2(4):303–314, 1989. +de Bezenac, E., Pajot, A., and Gallinari, P. Deep learn- +ing for physical processes: Incorporating prior scientific +knowledge. In International Conference on Learning +Representations, 2018. URL https://openreview. +net/forum?id=By4HsfWAZ. +de Bézenac, E., Pajot, A., and Gallinari, P. Deep learn- +ing for physical processes: incorporating prior scien- +tific knowledge. Journal of Statistical Mechanics: The- +ory and Experiment, 2019(12):124009, dec 2019. doi: +10.1088/1742-5468/ab3195. URL https://dx.doi. +org/10.1088/1742-5468/ab3195. +Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, +D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., +Heigold, G., Gelly, S., et al. An image is worth 16x16 +words: Transformers for image recognition at scale. arXiv +preprint arXiv:2010.11929, 2020. +Dwivedi, V. P. and Bresson, X. +A generalization +of transformer networks to graphs. +arXiv preprint +arXiv:2012.09699, 2020. +Dwivedi, V. P., Rampášek, L., Galkin, M., Parviz, A., Wolf, +G., Luu, A. T., and Beaini, D. Long range graph bench- +mark. arXiv preprint arXiv:2206.08164, 2022. +d’Ascoli, S., Touvron, H., Leavitt, M. L., Morcos, A. S., +Biroli, G., and Sagun, L. Convit: Improving vision trans- +formers with soft convolutional inductive biases. In In- +ternational Conference on Machine Learning, pp. 2286– +2296. PMLR, 2021. +Gilmer, J., Schoenholz, S. S., Riley, P. F., Vinyals, O., and +Dahl, G. E. Neural message passing for quantum chem- +istry. In International conference on machine learning, +pp. 1263–1272. PMLR, 2017. +Han, K., Wang, Y., Chen, H., Chen, X., Guo, J., Liu, Z., +Tang, Y., Xiao, A., Xu, C., Xu, Y., et al. A survey on +vision transformer. IEEE transactions on pattern analysis +and machine intelligence, 2022. +Hu, W., Fey, M., Zitnik, M., Dong, Y., Ren, H., Liu, B., +Catasta, M., and Leskovec, J. Open graph benchmark: +Datasets for machine learning on graphs. Advances in +neural information processing systems, 33:22118–22133, +2020. +Hu, W., Fey, M., Ren, H., Nakata, M., Dong, Y., +and Leskovec, J. +Ogb-lsc: A large-scale challenge +for machine learning on graphs. +arXiv preprint +arXiv:2103.09430, 2021. +Huang, B., Liu, C., Banzon, V., Freeman, E., Gra- +ham, +G., +Hankins, +B., +Smith, +T., +and Zhang, +H.-M. +Improvements of the daily optimum inter- +polation sea surface temperature (doisst) version +2.1. +Journal of Climate, 34(8):2923 – 2939, 2021. +doi: +10.1175/JCLI-D-20-0166.1. +URL https: +//journals.ametsoc.org/view/journals/ +clim/34/8/JCLI-D-20-0166.1.xml. +Hussain, M. S., Zaki, M. J., and Subramanian, D. Global +self-attention as a replacement for graph convolution. In +Proceedings of the 28th ACM SIGKDD Conference on +Knowledge Discovery and Data Mining, pp. 655–665, +2022. +Hwang, E., Thost, V., Dasgupta, S. S., and Ma, T. An +analysis of virtual nodes in graph neural networks for link +prediction. In Learning on Graphs Conference, 2022. +Kalyan, K. S., Rajasekharan, A., and Sangeetha, S. Am- +mus: A survey of transformer-based pretrained mod- +els in natural language processing. +arXiv preprint +arXiv:2108.05542, 2021. +Katharopoulos, A., Vyas, A., Pappas, N., and Fleuret, F. +Transformers are rnns: Fast autoregressive transformers +with linear attention. In Proceedings of the International + +On the Connection Between MPNN and Graph Transformer +Conference on Machine Learning (ICML), 2020a. URL +https://arxiv.org/abs/2006.16236. +Katharopoulos, A., Vyas, A., Pappas, N., and Fleuret, F. +Transformers are rnns: Fast autoregressive transformers +with linear attention. In International Conference on +Machine Learning, pp. 5156–5165. PMLR, 2020b. +Kim, J., Nguyen, T. D., Min, S., Cho, S., Lee, M., Lee, +H., and Hong, S. Pure transformers are powerful graph +learners. arXiv preprint arXiv:2207.02505, 2022. +Kingma, D. and Ba, J. Adam: A method for stochastic +optimization. International Conference on Learning Rep- +resentations, 12 2014. +Kreuzer, D., Beaini, D., Hamilton, W., Létourneau, V., and +Tossou, P. Rethinking graph transformers with spectral +attention. Advances in Neural Information Processing +Systems, 34:21618–21629, 2021. +Li, Q., Han, Z., and Wu, X.-M. Deeper insights into graph +convolutional networks for semi-supervised learning. In +Thirty-Second AAAI conference on artificial intelligence, +2018. +Lim, D., Robinson, J., Zhao, L., Smidt, T., Sra, S., Maron, +H., and Jegelka, S. Sign and basis invariant networks +for spectral graph representation learning. arXiv preprint +arXiv:2202.13013, 2022. +Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, +S., and Guo, B. Swin transformer: Hierarchical vision +transformer using shifted windows. In Proceedings of the +IEEE/CVF International Conference on Computer Vision, +pp. 10012–10022, 2021. +Maron, H., Ben-Hamu, H., Shamir, N., and Lipman, Y. +Invariant and equivariant graph networks. arXiv preprint +arXiv:1812.09902, 2018. +Mialon, G., Chen, D., Selosse, M., and Mairal, J. Graphit: +Encoding graph structure in transformers. arXiv preprint +arXiv:2106.05667, 2021. +Oono, K. and Suzuki, T. Graph neural networks exponen- +tially lose expressive power for node classification. arXiv +preprint arXiv:1905.10947, 2019. +Park, W., Chang, W.-G., Lee, D., Kim, J., et al. Grpe: +Relative positional encoding for graph transformer. In +ICLR2022 Machine Learning for Drug Discovery, 2022. +Rampášek, L., Galkin, M., Dwivedi, V. P., Luu, A. T., Wolf, +G., and Beaini, D. Recipe for a general, powerful, scal- +able graph transformer. arXiv preprint arXiv:2205.12454, +2022. +Reynolds, R. W., Smith, T. M., Liu, C., Chelton, D. B., +Casey, K. S., and Schlax, M. G. Daily high-resolution +blended analyses for sea surface temperature. J. Climate, +20:5473–5496, 2007. +Santoro, A., Raposo, D., Barrett, D. G., Malinowski, M., +Pascanu, R., Battaglia, P., and Lillicrap, T. A simple +neural network module for relational reasoning. Advances +in neural information processing systems, 30, 2017. +Segol, N. and Lipman, Y. On universal equivariant set +networks. arXiv preprint arXiv:1910.02421, 2019. +Shi, Y., Zheng, S., Ke, G., Shen, Y., You, J., He, J., +Luo, S., Liu, C., He, D., and Liu, T.-Y. Benchmarking +graphormer on large-scale molecular modeling datasets. +arXiv preprint arXiv:2203.04810, 2022. +Tay, Y., Dehghani, M., Bahri, D., and Metzler, D. Effi- +cient transformers: A survey. ACM Computing Surveys +(CSUR), 2020. +Topping, J., Di Giovanni, F., Chamberlain, B. P., Dong, +X., and Bronstein, M. M. Understanding over-squashing +and bottlenecks on graphs via curvature. arXiv preprint +arXiv:2111.14522, 2021. +Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, +L., Gomez, A. N., Kaiser, Ł., and Polosukhin, I. At- +tention is all you need. Advances in neural information +processing systems, 30, 2017. +Veliˇckovi´c, P., Cucurull, G., Casanova, A., Romero, A., +Lio, P., and Bengio, Y. Graph attention networks. arXiv +preprint arXiv:1710.10903, 2017. +Wang, R., Kashinath, K., Mustafa, M., Albert, A., and Yu, +R. Towards physics-informed deep learning for turbulent +flow prediction. pp. 1457–1466, 08 2020a. doi: 10.1145/ +3394486.3403198. +Wang, R., Walters, R., and Yu, R. Meta-learning dynamics +forecasting using task inference. In Oh, A. H., Agarwal, +A., Belgrave, D., and Cho, K. (eds.), Advances in Neural +Information Processing Systems, 2022. URL https: +//openreview.net/forum?id=BsSP7pZGFQO. +Wang, S., Li, B. Z., Khabsa, M., Fang, H., and Ma, H. +Linformer: Self-attention with linear complexity. arXiv +preprint arXiv:2006.04768, 2020b. +Wolf, T., Debut, L., Sanh, V., Chaumond, J., Delangue, C., +Moi, A., Cistac, P., Rault, T., Louf, R., Funtowicz, M., +et al. Transformers: State-of-the-art natural language +processing. In Proceedings of the 2020 conference on em- +pirical methods in natural language processing: system +demonstrations, pp. 38–45, 2020. + +On the Connection Between MPNN and Graph Transformer +Wu, Q., Zhao, W., Li, Z., Wipf, D., and Yan, J. Nodeformer: +A scalable graph structure learning transformer for node +classification. In Advances in Neural Information Pro- +cessing Systems, 2022. +Wu, Z., Jain, P., Wright, M., Mirhoseini, A., Gonzalez, +J. E., and Stoica, I. Representing long-range context for +graph neural networks with global attention. Advances +in Neural Information Processing Systems, 34:13266– +13279, 2021. +Yang, C., Wang, R., Yao, S., Liu, S., and Abdelzaher, T. +Revisiting over-smoothing in deep gcns. arXiv preprint +arXiv:2003.13663, 2020. +Ying, C., Cai, T., Luo, S., Zheng, S., Ke, G., He, D., Shen, Y., +and Liu, T.-Y. Do transformers really perform badly for +graph representation? Advances in Neural Information +Processing Systems, 34:28877–28888, 2021. +Zaheer, M., Kottur, S., Ravanbakhsh, S., Poczos, B., +Salakhutdinov, R. R., and Smola, A. J. Deep sets. Ad- +vances in neural information processing systems, 30, +2017. +Zhao, L. and Akoglu, L. Pairnorm: Tackling oversmoothing +in gnns. arXiv preprint arXiv:1909.12223, 2019. +Zweig, A. and Bruna, J. Exponential separations in symmet- +ric neural networks. arXiv preprint arXiv:2206.01266, +2022. + +On the Connection Between MPNN and Graph Transformer +A. Notations +We provide a notation table for references. +Table 6: Summary of important notations. +Symbol +Meaning +X ∈ X ⊂ Rn×d +graph node features +xi ∈ R1×d +graph node i’s feature +˜xi ∈ R1×d +approximated graph node i’s feature via attention selection +M +A multiset of vectors in Rd +W (l) +Q , W (l) +K , W (l) +V +∈ Rd×d′ +attention matrix of l-th self-attention layer in graph transformer +X +feature space +Xi +projection of feature space onto i-th coordinate +Lds +i +i-th linear permutation equivariant layer in DeepSets +L, L′ +full self attention layer; approximate self attention layer in Performer +z(l) +vn , z(l) +i +virtual/graph node feature at layer l of heterogeneous MPNN + VN +αvn +attention score in MPNN + VN +α(·, ·) +normalized attention score +αGATv2(·, ·) +normalized attention score with GATv2 +α′(·, ·) +unnormalized attention score. α′(u, v) = uWQ(WK)T vT +α′ +GATv2(·, ·) +unnormalized attention score with GATv2. α′ +GATv2(u, v) := aT LeakyReLU (W · [u∥v] + b) +A +space of attentions, where each element α ∈ A is of form α(u, v) = softmax(uWQ(WK)T vT ) +C1 +upper bound on norm of all node features ∥xi∥ +C2 +upper bound on the norm of WQ, WK, WV in target L +C3 +upper bound on the norm of attention weights of αvn when selecting xi +γ(k)(·, ·) +update function +θ(k)(·, ·) +message function +τ(·) +aggregation function +B. O(n) Heterogeneous MPNN + VN Layer with O(1) Width Can Approximate 1 Self Attention +Layer Arbitrarily Well +B.1. Assumptions +Definition B.1 ((V , δ) separable by ¯α). Given a graph G of size n and a fixed V ∈ Rn×d = [v1, ..., vn] and ¯α ∈ A, we +say node feature X ∈ Rn×d of G is (V , δ) separable by some ¯α if the following holds. For any node feature xi, there exist +weights W ¯α +K, W ¯α +Q in attention score ¯α such that ¯α(xi, vi) > maxj̸=i ¯α(xj, vi) + δ. We say set X is (V , δ) separable by ¯α +if every element X ∈ X is (V , δ) separable by ¯α. +A special case of (V , δ) separable is when δ = 0, i.e., ∀i, ¯α(xi, vi) > maxj̸=i ¯α(xj, vi). We provide a geometric +characterization of X being (V , 0) separable. +Lemma B.2. Given ¯α and V , X is (V , 0) separable by ¯α ⇐⇒ xi is not in the convex hull spanned by {xj}j̸=i. ⇐⇒ there +are no points in the convex hull of {xi}i∈[n]. +Proof. The second equivalence is trivial so we only prove the first equivalence. By definition, X is (V , 0) separable by ¯α +⇐⇒ ¯α(xi, vi) > maxj̸=i ¯α(xj, vi)∀i ∈ [n] ⇐⇒ ⟨xi, W ¯α +QW ¯α,T +K +vi⟩ > maxj̸=i⟨xj, W ¯α +QW ¯α,T +K +vi⟩∀i ∈ [n]. +By denoting the v′ +i := W ¯α +QW ¯α,T +K +vi ∈ Rd, we know that ⟨xi, v′ +i⟩ > maxj̸=i⟨xj, v′ +i⟩∀i ∈ [n], which implies that +∀i ∈ [n], xi can be linearly seprated from {xj}j̸=i ⇐⇒ xi is not in the convex hull spanned by {xj}j̸=i, which concludes +the proof. +Lemma B.3 (approximate hard selection). Given X is (V , δ) separable by ¯α for some fixed V ∈ Rn×d, ¯α ∈ A and +δ > 0, the following holds. For any ϵ > 0 and i ∈ [n], there exists a set of attention weights Wi,Q, Wi,K in i-th layer of + +On the Connection Between MPNN and Graph Transformer +MPNN + VN such that αvn(xi, vi) > 1 − ϵ for any xi ∈ Xi. In other words, we can approximate a hard selection function +fi(x1, ..., xn) = xi arbitrarily well on X by setting αvn = ¯α. +Proof. Denote ¯α′ as the unnormalized ¯α. As X is (V , δ) separable by ¯α, by definition we know that ¯α(xi, vi) > +maxj̸=i ¯α(xj, vi) + δ holds for any i ∈ [n] and xi ∈ M. We can amplify this by multiple the weight matrix in ¯α by a +constant factor c to make ¯α′(xi, vi) > maxj̸=i ¯α′(xj, vi) + cδ. This implies that e¯α′(xi,vi) > ecδ maxj̸=i e¯α′(xj,vi). This +means after softmax, the attention score ¯α(xi, vi) will be at least +ecδ +ecδ+n−1. We can pick a large enough c(δ, ϵ) such that +¯α(xi, vi) > 1 − ϵ for any xi ∈ Xi and ϵ > 0. +Proof Intuition and Outline. On the high level, i-th MPNN + VN layer will select ˜xi, an approximation i-th node feature +xi via attention mechanism, enabled by Lemma 6.2, and send ˜xi to the virtual node. Virtual node will then pass the ˜xi to all +graph nodes and computes the approximation of eα(xi,xj), ∀j ∈ [n]. Repeat such procedures n times for all graph nodes, +and finally, use the last layer for attention normalization. +The main challenge of the proof is to 1) come up with message/update/aggregation functions for heterogeneous MPNN ++ VN layer, which is shown in Appendix B.2, and 2) ensure the approximation error, both from approximating Aggre- +gate/Message/Update function with MLP and the noisy input, can be well controlled, which is proved in Appendix B.4. +We will first instantiate the Aggregate/Message/Update function for virtual/graph nodes in Appendix B.2, and prove that +each component can be either exactly computed or approximated to an arbitrary degree by MLP. Then we go through an +example in Appendix B.3 of approximate self-attention layer L with O(n) MPNN + VN layers. The main proof is presented +in Appendix B.4, where we show that the approximation error introduced during different steps is well controlled. Lastly, in +Appendix B.5 we show assumption on node features can be relaxed if a more powerful attention mechanism GATv2 (Brody +et al., 2021) is allowed in MPNN + VN. +B.2. Aggregate/Message/Update Functions +Let M be a multiset of vectors in Rd. The specific form of Aggregate/Message/Update for virtual and graph nodes are listed +below. Note that ideal forms will be implemented as MLP, which will incur an approximation error that can be controlled to +an arbitrary degree. We use z(k) +vn denotes the virtual node’s feature at l-th layer, and z(k) +i +denotes the graph node i’s node +feature. Iteration index k starts with 0 and the node index starts with 1. +B.2.1. VIRTUAL NODE +At k-th iteration, virtual node i’s feature z(k) +i +is a concatenation of three component [˜xi, vk+1, 0] where the first component +is the approximately selected node features xi ∈ Rd, the second component is the vi ∈ Rd that is used to select the node +feature in i-th iteration. The last component is just a placeholder to ensure the dimension of the virtual node and graph node +are the same. It is introduced to simplify notation. +Initial feature is z(0) +vn = [0d, v1, 0]. +Message function + Aggregation function τj∈[n]φ(k) +vn-gn : R2d+1 × M → R2d+1 has two cases to discuss depending on value +of k. For k = 1, 2, ..., n, +τj∈[n]φ(k) +vn-gn(z(k−1) +vn +, {z(k−1) +i +}i) = +�� +i αvn(z(k−1) +vn +, z(k−1) +i +)z(k−1) +i +k = 1, 2, ..., n +12d+1 +k = n + 1, n + 2 +(7) +where z(k−1) +vn += [˜xk−1, vk, 0]. z(k−1) +i += [ +2d+1 dim +� +�� +� +xi +���� +d dim +, ..., ...] is the node i’s feature, where the first d coordinates remain fixed for +different iteration k. τj∈[n]φ(k) +vn-gn use attention αvn to approximately select k-th node feature [ +2d+1 dim +� +�� +� +xk +���� +d dim +, ..., ...]. Note that the +particular form of attention αvn needed for soft selection is not important as long as we can approximate hard selection + +On the Connection Between MPNN and Graph Transformer +arbitrarily well. As the z(k−1) +vn +contains vk and z(k−1) +i +contains xi (see definition of graph node feature in Appendix B.2.2), +this step can be made as close to hard selection as possible, according to Lemma B.7. +In the case of k = n + 1, τj∈[n]φ(k) +vn-gn : R2d+1 +� �� � +vn +× M +���� +set of gn +→ Rd simply returns 12d+1. This can be exactly implemented by +an MLP. +Update function γ(k) +vn +: R2d+1 +� �� � +vn +× R2d+1 +� �� � +gn +→ R2d+1: Given the virtual node’s feature in the last iteration, and the selected +feature in virtual node y = [xk, ..., ...] with αvn, +γ(k) +vn (·, y) = +� +� +� +� +� +[y0:d, vk+1, 0] +k = 1, ..., n − 1 +[y0:d, 0d, 0] +k = n +12d+1 +k = n + 1, n + 2 +(8) +where y0:d denotes the first d channels of y ∈ R2d+1. y denotes the selected node zi’s feature in Message/Aggregation +function. γ(k) +vn can be exactly implemented by an MLP for any k = 1, ..., n + 2. +B.2.2. GRAPH NODE +Graph node i’s feature vi ∈ R2d+1 can be thought of as a concatenation of three components [ xi +���� +d dim +, tmp +���� +d dim +, partialsum +� +�� +� +1 dim +], +where xi, ∈ Rd, tmp ∈ Rd 3, and partialsum ∈ R. +In particular, xi is the initial node feature. The first d channel will stay the same until the layer n + 2. tmp = +� +j∈subset of[n] eα′ +ijxj stands for the unnormalized attention contribution up to the current iteration. partialsum ∈ R +is a partial sum of the unnormalized attention score, which will be used for normalization in the n + 2-th iteration. +Initial feature z(0) +gn = [xi, 0d, 0]. +Message function + Aggregate function: τj∈[n]φ(k) +gn-vn : R2d+1 × R2d+1 → R2d+1 is just “copying the second argument” +since there is just one incoming message from the virtual node, i.e., τj∈[n]φ(k) +gn-vn(x, {y}) = y. This function can be exactly +implemented by an MLP. +Update function γ(k) +gn : R2d+1 +� �� � +gn +× R2d+1 +� �� � +vn +→ R2d+1 is of the following form. +γ(k) +gn ([x, tmp, partialsum], y) = +� +� +� +� +� +� +� +� +� +[x, tmp, partialsum] +k = 1 +[x, tmp + eα′(x,y0:d)WV y0:d, +partialsum + eα′(x,y0:d)] +k = 2, ..., n + 1 +[ +tmp +partialsum, 0d, 0] +k = n + 2 +(9) +where α′(x, y0:d) is the usual unnormalized attention score. Update function γ(k) +gn can be arbitrarily approximated by an +MLP, which is proved below. +Lemma B.4. Update function γ(k) +gn can be arbitrarily approximated by an MLP from R2d+1 × R2d+1 to R2d+1 for all +k = 1, ..., n + 2. +Proof. We will show that for any k = 1, ..., n + 2, the target function γ(k) +gn : R2d+1 × R2d+1 → R2d+1 is continuous and +the domain is compact. By the universality of MLP in approximating continuous function on the compact domain, we know +γ(k) +gn can be approximated to arbitrary precision by an MLP. +3tmp technicially denotes the dimension of projected feature by WV and does not has to be in Rd. We use Rd here to reduce the +notation clutter. + +On the Connection Between MPNN and Graph Transformer +Recall that +γ(k) +gn ([x, tmp, partialsum], y) = +� +� +� +� +� +� +� +� +� +[x, tmp, partialsum] +k = 1 +[x, tmp + eα′(x,y0:d)WV y0:d, +partialsum + eα′(x,y0:d)] +k = 2, ..., n + 1 +[ +tmp +partialsum, 0d, 0] +k = n + 2 +it is easy to see that k = 1, γ(1) +gn is continuous. We next show for k = 2, ..., n + 2, γ(1) +gn is also continuous and all arguments +lie in a compact domain. +γ(k) +gn +is continuous because to a) α′(x, y) is continuous b) scalar-vector multiplication, sum, and exponential are all +continuous. Next, we show that four component x, tmp, partialsum, y0:d all lies in a compact domain. +x is the initial node features, and by AS1 their norm is bounded so x is in a compact domain. +tmp is an approximation of eα′ +i,1WV x1 + eα′ +i,2WV x2 + .... As α′(xi, xj) is both upper and lower bounded by AS2 for all +i, j ∈ [n] and xi is bounded by AS1, eα′ +i,1WV x1 + eα′ +i,2WV x2 + ... is also bounded from below and above. tmp will also +be bounded as we can control the error to any precision. +partialsum is an approximation of eα′ +i,1 + eα′ +i,2 + .... For the same reason as the case above, partialsum is also bounded +both below and above. +y0:d will be ˜xi at i-th iteration so it will also be bounded by AS1. +Therefore we conclude the proof. +B.3. A Running Example +We provide an example to illustrate how node features are updated in each iteration. +Time 0: All nodes are initialized as indicated in Appendix B.2. Virtual node feature z(0) +vn = [0d, v1, 0]. Graph node feature +z(0) +i += [xi, 0d, 0] for all i ∈ [n]. +Time 1: +For virtual node, according to the definition of τj∈[n]φ(1) +vn-gn in Equation (7), it will pick an approximation of x1, i.e. ˜x1. +Note that the approximation error can be made arbitrarily small. VN’s node feature z(1) +vn = [˜x1, v2, 0]. +For i-th graph node feature, z(0) +vn = 1d, and z(0) +i += [xi, 0d, 0]. According to γ(k) +gn in Equation (9), z(1) +i += [xi, 0d, 0]. +Time 2: +For the virtual node feature: similar to the analysis in time 1, VN’s feature z(2) +vn = [˜x2, v3, 0] now. Note that the weights and +bias in τj∈[n]φ(2) +vn-gn will be different from those in τj∈[n]φ(1) +vn-gn. +For i-th graph node feature, as z(1) +vn += [˜x1, v2, 0] and z(1) +i += [xi, 0d, 0], according to γ(k) +gn +in Equation (9), z(2) +i += +[xi, e +� +α′ +i,1WV ˜x1, e +� +α′ +i,1]. Here � +α′ +i,1 := α′(xi, ˜x1). We will use similar notations in later iterations. 4 +Time 3: +Similar to the analysis above, z(3) +vn = [� +x3, v4, 0]. +z(3) +i += [xi, e +� +α′ +i,1WV ˜x1 + e +� +α′ +i,2WV ˜x2, e +� +α′ +i,1 + e +� +α′ +i,2]. +Time n: +z(n) +vn = [˜xn, 0d, 0]. +4To reduce the notation clutter and provide an intuition of the proof, we omit the approximation error introduced by using MLP to +approximate aggregation/message/update function, and assume the aggregation/message/update can be exactly implemented by neural +networks. In the proofs, approximation error by MLP is handled rigorously. + +On the Connection Between MPNN and Graph Transformer +z(n) +i += xi, e +� +α′ +i,1WV ˜x1 + ... + e +� +α′ +i,n−1WV � +xn−1 +� +�� +� +n−1 terms +, +e +� +α′ +i,1 + e +� +α′ +i,2 + ... + e +� +α′ +i,n−1] +� +�� +� +n−1 terms +. +Time n + 1: +According to Appendix B.2.1, in n + 1 iteration, the virtual node’s feature will be 1d. +z(n+1) +i += [xi, � +k∈[n] e +� +α′ +ikWV ˜xk, � +k∈[n] e +� +α′ +ik] +Time n + 2 (final layer): +For the virtual node, its node feature will stay the same. +For the graph node feature, the last layer will serve as a normalization of the attention score (use MLP to approximate vector- +scalar multiplication), and set the last channel to be 0 (projection), resulting in an approximation of [xi, +� +k∈[n] e +� +α′ +ik WV ˜xk +� +k∈[n] e +� +α′ +ik +, 0]. +Finally, we need one more linear transformation to make the node feature become [ +� +k∈[n] e +� +α′ +ik WV ˜xk +� +k∈[n] e +� +α′ +ik +, 0d, 0]. The first d +channel is an approximation of the output of the self-attention layer for node i where the approximation error can be made +as small as possible. This is proved in Appendix B, and we conclude that heterogeneous MPNN + VN can approximate the +self-attention layer L to arbitrary precision with O(n) MPNN layers. +B.4. Controlling Error +On the high level, there are three major sources of approximation error: 1) approximate hard selection with self-attention and +2) approximate equation γ(k) +gn with MLPs, and 3) attention normalization in the last layer. In all cases, we aim to approximate +the output of a continuous map Lc(x). However, our input is usually not exact x but an approximation of ˜x. We also cannot +access the original map Lc but instead, an MLP approximation of Lc, denoted as LMLP. The following lemma allows to +control the difference between Lc(x) and LMLP(˜x). +Lemma B.5. Let Lc be a continuous map from compact set to compact set in Euclidean space. Let LMLP be the ap- +proximation of Lc by MLP. If we can control ∥x − ˜x∥ to an arbitrarily small degree, we can then control the error +∥Lc(x) − LMLP(˜x)∥ arbitrarily small. +Proof. By triangle inequality ∥Lc(x) − LMLP(˜x)∥ ≤ ∥Lc(x) − LMLP(x))∥ + ∥LMLP(x) − LMLP(˜x)∥. +For the first term ∥Lc(˜x)−LMLP(˜x)∥, by the universality of MLP, we can control the error ∥Lc(˜x)−LMLP(˜x)∥ in arbitrary +degree. +For the second term ∥LMLP(x) − LMLP(˜x)∥, as LMLP is continuous on a compact domain, it is uniformly continuous by +Heine-Cantor theorem. This means that we can control the ∥LMLP(x) − LMLP(˜x)∥ as long as we can control ∥x − ˜x∥, +independent from different x. By assumption, this is indeed the case so we conclude the proof. +Remark B.6. The implication is that when we are trying to approximate the output of a continuous map Lc on the compact +domain by an MLP LMLP, it suffices to show the input is 1) ∥Lc − LMLP∥∞ and 2) ∥˜x − x∥ can be made arbitrarily small. +The first point is usually done by the universality of MLP on the compact domain (Cybenko, 1989). The second point needs +to be shown case by case. +In the Appendix B.3, to simplify the notations we omit the error introduced by using MLP to approximate aggrega- +tion/message/update functions (continuous functions on the compact domain of Rd.) in MPNN + VN. Lemma B.5 justify +such reasoning. +Lemma B.7 (˜xi approximates xi. � +α′ +i,j approximates α′ +i,j.). For any ϵ > 0 and x ∈ X, there exist a set of weights for +message/aggregate functions of the virtual node such that ||xi − ˜xi|| < ϵ and |α′ +i,j − � +α′ +i,j| < ϵ. + +On the Connection Between MPNN and Graph Transformer +Proof. By Lemma 6.2 We know that � +αi,j := �α(xi, xj) → δ(i − j) as C3(ϵ) goes to infinity. Therefore we have +||˜xi − xi|| = || +� +j +� +αi,jxj − xi|| = || +� +(�αi,j − δ(i − j))xj|| < ϵ +� +||xj|| < nC1ϵ +(10) +As n and C1 are fixed, we can make the upper bound as small as we want by increasing C3. +|α′ +i,j−� +α′ +i,j| = |α′(xi, xj)−α′ +MLP(˜xi, xj)| = |α′(xi, xj)−α′(˜xi, xj)|+|α′(˜xi, xj)−α′ +MLP(˜xi, xj)| = |α′(xi−˜xi, xj)| = +(xi − ˜xi)T xjC2 +2 + ϵ < nC1ϵC1C2 +2 + ϵ = (nC2 +1C2 +2 + 1)ϵ. As α′ +i,j, � +α′ +i,j is bounded from above and below, it’s easy to see +that |eα′ +i,j − e +� +α′ +i,j| = |eα′ +i,j(1 − eα′ +i,j− � +α′ +i,j)| < C(1 − eα′ +i,j− � +α′ +i,j) can be controlled to arbitrarily degree. +Theorem 6.3. Assume AS 1-3 hold for the compact set X and L. Given any graph G of size n with node features X ∈ X, +and a self-attention layer L on G (fix WK, WQ, WV in α), there exists a O(n) layer of heterogeneous MPNN + VN with +the specific aggregate/update/message function that can approximate L on X arbitrarily well. +Proof. i-th MPNN + VN layer will select ˜xi, an arbitrary approximation i-th node feature xi via attention mechanism. This +is detailed in the message/aggregation function of the virtual node in Appendix B.2.1. Assuming the regularity condition on +feature space X, detailed in AS3, the approximation error can be made as small as needed, as shown in Lemmas 6.2 and B.7. +Virtual node will then pass the ˜xi to all graph nodes, which computes an approximation of eα′(˜xi,xj), ∀j ∈ [n]. This step +is detailed in the update function γ(k) +gn of graph nodes, which can also be approximated arbitrarily well by MLP, proved +in Lemma B.4. By Lemma B.5, we have an arbitrary approximation of eα′(˜xi,xj), ∀j ∈ [n], which itself is an arbitrary +approximation of eα′(xi,xj), ∀j ∈ [n]. +Repeat such procedures n times for all graph nodes, we have an arbitrary approximation of � +k∈[n] eα′ +ikWV xk ∈ Rd and +� +k∈[n] eα′ +ik ∈ R. Finally, we use the last layer to approximate attention normalization Lc(x, y) = x +y , where x ∈ Rd, y ∈ R. +As inputs for attention normalization are arbitrary approximation of � +k∈[n] eα′ +ikWV xk and � +k∈[n] eα′ +ik, both of them +are lower/upper bounded according to AS1 and AS2. Since the denominator is upper bounded by a positive number, this +implies that the target function Lc is continuous in both arguments. By evoking Lemma B.5 again, we conclude that we can +approximate its output +� +k∈[n] eα′ +ik WV xk +� +k∈[n] eα′ +ik +arbitrarily well. This concludes the proof. +B.5. Relaxing Assumptions with More Powerful Attention +One limitation of Theorem 6.3 are assumptions on node features space X: we need to 1) restrict the variability of node +feature so that we can select one node feature to process each iteration. 2) The space of the node feature also need to satisfy +certain configuration in order for VN to select it. For 2), we now consider a different attention function for αvn in MPNN + +VN that can relax the assumptions AS3 on X. +More powerful attention mechanism. From proof of Theorem 6.3, we just need α(·, ·) uniformly select every node in +X ∈ X. The unnormalized bilinear attention α′ is weak in the sense that f(·) = ⟨xiWQW T +K, ·⟩ has a linear level set. Such +a constraint can be relaxed via an improved attention module GATv2. Observing the ranking of the attention scores given by +GAT (Veliˇckovi´c et al., 2017) is unconditioned on the query node, Brody et al. (2021) proposed GATv2, a more expressive +attention mechanism. In particular, the unnormalized attention score α′ +GATv2(u, v) := aT LeakyReLU (W · [u∥v] + b), +where [·||·] is concatenation. We will let αvn = αGATv2 to select features in τj∈[n]φ(k) +vn-gn. +Lemma B.8. α′ +GATv2(·, ·) can approximate any continuous function from Rd × Rd → R. For any v ∈ Rd, a restriction of +α′ +GATv2(·, v) can approximate any continuous function from Rd → R. +Proof. Any function continuous in both arguments of α′ +GATv2 is also continuous in the concatenation of both arguments. As +any continuous functions in R2d can be approximated by α′ +GATv2 on a compact domain according to the universality of MLP +(Cybenko, 1989), we finish the proof for the first statement. + +On the Connection Between MPNN and Graph Transformer +(a) +(b) +Figure 2: In the left figure, we have one example of X being (V , δ) separable, for which α can uniformly select any point +(marked as red) xi ∈ Xi. In the right figure, we change αvn in MPNN + VN to αGATv2, which allows us to select more +diverse feature configurations. The cluster in the middle cannot be selected by any α ∈ A but can be selected by αGATv2 +according to Proposition B.10. +For the second statement, we can write W as 2 × 2 block matrix and restrict it to cases where only W11 is non-zero. Then +we have +α′ +GATv2(u, v) = aT LeakyReLU +�� W11 +W12 +W21 +W22 +� +· +� u +v +� ++ b +� += aT LeakyReLU (W11u + b) +(11) +which gives us an MLP on the first argument u. By the universality of MLP, we conclude the proof for the second statement. +Definition B.9. Given δ > 0, We call X is δ nonlinearly separable if and only if mini̸=j d(Xi, Xj) > δ. +AS 3’. X is δ nonlinearly separable for some δ > 0. +Proposition B.10. If X ⊂ Rn×d satisfies that Xi is δ-separated from Xj for any i, j ∈ [n], the following holds. For any +X ∈ X and i ∈ [n], there exist a αGATv2 to select any xi ∈ Xi. This implies that we can arbitrarily approximate the +self-attention layer L after relaxing AS3 to AS3’. +Proof. For any i ∈ [n], as Xi is δ-separated from other Xj, ∀j ̸= i, we can draw a region Ωi ⊂ Rd that contains Xi and +separate Xi from other Xj(j ̸= i), where the distance from Xi from other Xj is at least δ according to the definition of +Definition B.9. Next, we show how to construct a continuous function f whose value in Xi is at least 1 larger than its values +in any other Xj ∀j ̸= i. +We set the values of f in Xi to be 1.5 and values of f in Xj, ∀j ̸= i to be 0. We can then interpolate f in areas outside +of ∪Xi (one way is to set the values of f(x) based on d(x, Xi), which results in a continuous function that satisfies our +requirement. By the universality of αGATv2, we can approximate f to arbitrary precision, and this will let us select any +Xi. +C. On the Limitation of MPNN + VN +Although we showed that in the main paper, MPNN + VN of varying depth/width can approximate the self-attention of +full/linear transformers, this does not imply that there is no difference in practice between MPNN + VN and GT. Our +theoretical analysis mainly focuses on approximating self-attention without considering computational efficiency. In this +section, we mention a few limitations of MPNN + VN compared to GT. +C.1. Representation Gap +The main limitation of deep MPNN + VN approximating full self-attention is that we require a quite strong assumption: +we restrict the variability of node features in order to select one node feature to process each iteration. Such assumption is +relaxed by employing stronger attention in MPNN + VN but is still quite strong. +For the large width case, the main limitation is the computational complexity: even though the self-attention layer requires +O(n2) complexity, to approximate it in wide MPNN + VN framework, the complexity will become O(nd) where d is the +dimension of node features. + +On the Connection Between MPNN and Graph Transformer +We think such limitation shares a similarity with research in universal permutational invariant functions. Both DeepSets +(Zaheer et al., 2017) and Relational Network (Santoro et al., 2017) are universal permutational invariant architecture but +there is still a representation gap between the two (Zweig & Bruna, 2022). Under the restriction to analytic activation +functions, one can construct a symmetric function acting on sets of size n with elements in dimension d, which can be +efficiently approximated by the Relational Network, but provably requires width exponential in n and d for the DeepSets. +We believe a similar representation gap also exists between GT and MPNN + VN and leave the characterization of functions +lying in such gap as the future work. +C.2. On The Difficulty of Approximating Other Linear Transformers +In Section 4, we showed MPNN + VN of O(1) width and depth can approximate the self-attention layer of one type of +linear transformer, Performer. The literature on efficient transformers is vast (Tay et al., 2020) and we do not expect MPNN ++ VN can approximate many other efficient transformers. Here we sketch a few other linear transformers that are hard to +approximate by MPNN + VN of constant depth and width. +Linformer (Wang et al., 2020b) projects the n×d dimension keys and values to k×d suing additional projection layers, which +in graph setting is equivalent to graph coarsening. As MPNN + VN still operates on the original graph, it fundamentally +lacks the key component to approximate Linformer. +We consider various types of efficient transformers effectively generalize the virtual node trick. By first switching to a more +expansive model and reducing the computational complexity later on, efficient transformers effectively explore a larger +model design space than MPNN + VN, which always sticks to the linear complexity. +C.3. Difficulty of Representing SAN Type Attention +In SAN (Kreuzer et al., 2021), different attentions are used conditional on whether an edge is presented in the graph or not, +detailed below. One may wonder whether we can approximate such a framework in MPNN + VN. +In our proof of using MPNN + VN to approximate regular GT, we mainly work with Definition 3.4 where we do not use any +gn-gn edges and therefore not leverage the graph topology. It is straightforward to use gn-gn edges and obtain the different +message/update/aggregate functions for gn-gn edges non-gn-gn edges. Although we still achieve the similar goal of SAN to +condition on the edge types, it turns out that we can not arbitrarily approximate SAN. +Without loss of generality, SAN uses two types of attention depending on whether two nodes are connected by the edge. +Specifically, +ˆwk,l +ij = +� +� +� +Q1,k,lhl +i◦K1,k,lhl +j◦E1,k,leij +√dk +if i and j are connected in sparse graph +Q2,k,lhl +i◦K2,k,lhl +j◦E2,k,leij +√dk +otherwise +� +� +� +wk,l +ij = +� +� +� +1 +1+γ · softmax +�� +dk ˆwk,l +ij +� +if i and j are connected in sparse graph +γ +1+γ · softmax +�� +dk ˆwk,l +ij +� +otherwise +� +� +� +(12) +where ◦ denotes element-wise multiplication and Q1,k,l, Q2,k,l, K1,k,l, K2,k,l, E1,k,l, E2,k,l ∈ Rdk×d. γ ∈ R+is a +hyperparameter that tunes the amount of bias towards full-graph attention, allowing flexibility of the model to different +datasets and tasks where the necessity to capture long-range dependencies may vary. +To reduce the notation clutter, we remove the layer index l, and edge features, and also consider only one-attention head +case (remove attention index k). The equation is then simplified to +ˆwij = +� +� +� +Q1hl +i◦K1hl +j +√dk +if i and j are connected in sparse graph +Q2hl +i◦K2hl +j +√dk +otherwise +� +� +� +wij = +� +1 +1+γ · softmax (� +d ˆwij) +if i and j are connected in sparse graph +γ +1+γ · softmax (� +d ˆwij) +otherwise +� +(13) +We will then show that Equation (13) can not be expressed (up to an arbitrary approximation error) in MPNN + VN +framework. To simulate SAN type attention, our MPNN + VN framework will have to first simulate one type of attention +for all edges, as we did in the main paper, and then simulate the second type of attention between gn-gn edges by properly + +On the Connection Between MPNN and Graph Transformer +offset the contribution from the first attention. This turns out to be impossible as we cannot express the difference between +two attention in the new attention mechanism. +D. Experimental Details +D.1. Dataset Description +ogbg-molhiv and ogbg-molpcba (Hu et al., 2020) are molecular property prediction datasets adopted by OGB from +MoleculeNet. These datasets use a common node (atom) and edge (bond) featurization that represent chemophysical +properties. The prediction task of ogbg-molhiv is a binary classification of molecule?s fitness to inhibit HIV replication. The +ogbg-molpcba, derived from PubChem BioAssay, targets to predict the results of 128 bioassays in the multi-task binary +classification setting. +ogbg-ppa (Wu et al., 2021) consists of protein-protein association (PPA) networks derived from 1581 species categorized +into 37 taxonomic groups. Nodes represent proteins and edges encode the normalized level of 7 different associations +between two proteins. The task is to classify which of the 37 groups does a PPA network originate from. +ogbg-code2 (Wu et al., 2021) consists of abstract syntax trees (ASTs) derived from the source code of functions written in +Python. The task is to predict the first 5 subtokens of the original function?s name. +OGB-LSC PCQM4Mv2 (Hu et al., 2021) is a large-scale molecular dataset that shares the same featurization as ogbg-mol* +datasets. It consists of 529,434 molecule graphs. The task is to predict the HOMO-LUMO gap, a quantum physical property +originally calculated using Density Functional Theory. True labels for original ?test-dev? and ?test-challange? dataset +splits are kept private by the OGB-LSC challenge organizers. Therefore for the purpose of this paper, we used the original +validation set as the test set, while we left out random 150K molecules for our validation set. +D.2. Reproducibility +For LRGB results in Section 7.1, we reproduce the original results up to very small differences. +Table 7: Reproduce the original results up to small differences. No VN is used. +Model +# Params. +Peptides-func +Peptides-struct +Test AP (reproduce) +Test AP ↑ +Test MAE (reproduce) +Test MAE ↓ +GCN +508k +0.5918±0.0065 +0.5930±0.0023 +0.3468±0.0009 +0.3496±0.0013 +GINE +476k +0.5595±0.0126 +0.5498±0.0079 +0.3532±0.0024 +0.3547±0.0045 +GatedGCN +509k +0.5886±0.0027 +0.5864±0.0077 +0.3409±0.0011 +0.3420±0.0013 +GatedGCN+RWSE +506k +0.6083±0.0032 +0.6069±0.0035 +0.3377±0.0025 +0.3357±0.0006 +D.3. Additional Experiments +We tested MPNN + VN on PascalVOC-SP datasets and also observe improvement, shown in Table 8, although the +improvement is not as large as that of Peptides-func and Peptides-struct datasets. The best MPNN + VN model +is GatedGCN + LapPE where the performance gap to the best GT model is rather small. +D.4. Predicting Sea Surface Temperature +In this experiment, we consider a specific physical modeling problem: forecasting sea surface temperature (SST), that +is the water temperature close to the ocean’s surface. SST is an essential climate indicator and plays a significant role +in analyzing and monitoring the dynamics of weather, climate, and other biological systems for several applications in +environmental protection, agriculture, and industry. We use the NOAA/NESDIS/NCEI Daily Optimum Interpolation Sea +Surface Temperature (DOISST) version 2.1 proposed by (Huang et al., 2021) as an improvement upon version 2.0 from +(Reynolds et al., 2007). We consider the daily SST data of the Pacific Ocean from 1982 to 2021, in the region of longitudes +from 180.125◦E to 269.875◦E and latitudes from −14.875◦N to 14.875◦N. We reduce the resolution of the original data +from 0.25◦-degree to 0.5◦-degree. Following the procedure from (de Bezenac et al., 2018), (de Bézenac et al., 2019) and +(Wang et al., 2022), we divide the region into 11 square batches of equal size (see Table 10), each contains exactly 30 + +On the Connection Between MPNN and Graph Transformer +Table 8: Baseline experiments for PascalVOC-SP and COCO-SP with rag-boundary graph on SLIC compactness +30 for the node classification task. The performance metric is macro F1 on the respective splits (Higher is better). All +experiments are run 4 times with 4 different seeds. The MP-GNN models are 8 layers deep, while the transformer-based +models have 4 layers in order to maintain comparable hidden representation size at the fixed parameter budget of 500k. +Bold: Best score. +Model +# Params +PascalVOC-SP +Before VN + Test F1 +After VN + Test F1 ↑ +GCN +496k +0.1268±0.0060 +0.1901±0.0040 +GINE +505k +0.1265±0.0076 +0.1198±0.0073 +GatedGCN +502k +0.2873±0.0219 +0.2874±0.0178 +GatedGCN+LapPE +502k +0.2860±0.0085 +0.3103±0.0068 +Transformer+LapPE +501k +0.2694±0.0098 +- +SAN+LapPE +531k +0.3230±0.0039 +- +SAN+RWSE +468k +0.3216±0.0027 +- +Table 9: Number of training, validation and testing examples for each setting in the task of SST prediction. +History window +Prediction window +Train size +Validation size +Test size +6 weeks +4 weeks +147, 884 +3, 245 +7, 271 +2 weeks +148, 038 +3, 399 +7, 425 +1 week +148, 115 +3, 476 +7, 502 +longitudes and 30 latitudes that can be represented as a grid graph of 900 nodes in which we connect each node to its nearest +8 neighbors. We take time series from 1982 to 2018 as our training set, data in 2019 as our validation set, and data from 2020 +to 2021 as our testing set. In our experiments, we set the history window wh as 6 weeks (i.e. 42 days) and the prediction +window wp as 4 weeks (i.e. 28 days), 2 weeks (i.e. 14 days) or 1 week (i.e. 7 days). For each example, each node of the +graph is associated with an input time series capturing the temperatures at the corresponding (longitude, latitude) for the +last wh days, and the task is to predict the output time series of temperatures for the next wp days. We represent each time +series as a long vector and the learning task is fundamentally a node-level regression task. We make sure that there is no +overlapping among training, validation and testing sets (e.g., the output of a training example will not appear in any input +of another validation example). The number of training, validation, and testing examples are roughly 150K, 3K and 7K, +respectively for each setting (see Table 9). We compare our MPNN + VN model with: +• Multilayer Perceptron (MLP) which treats both the input and output as long vectors and has 512 hidden neurons. +• TF-Net (Wang et al., 2020a) with the setting as in the original paper. +• Linear Transformer (Katharopoulos et al., 2020a) (Wang et al., 2020b)5 with Laplacian positional encoding (LapPE). +We compute the first 16 eigenvectors as positions for LapPE. +Both MPNN and MPNN + VN have 3 layers of message passing with 256 hidden dimensions. We apply an MLP with one +hidden layer of 512 neurons on top of the network to make the final prediction. +We train all our models with 100 epochs with batch size 20, initial learning rate 10−3, and Adam optimizer (Kingma & Ba, +2014). +5The +Linear +Transformer +implementation +is +publicly +available +at +https://github.com/lucidrains/ +linear-attention-transformer + +On the Connection Between MPNN and Graph Transformer +Table 10: These are 11 regions of the Pacific in our experiment. +Index +Longitudes +Latitues +1 +[180.125◦E, 194.875◦E] +[-14.875◦N, -0.125◦N] +2 +[195.125◦E, 209.875◦E] +[-14.875◦N, -0.125◦N] +3 +[210.125◦E, 224.875◦E] +[-14.875◦N, -0.125◦N] +4 +[225.125◦E, 239.875◦E] +[-14.875◦N, -0.125◦N] +5 +[240.125◦E, 254.875◦E] +[-14.875◦N, -0.125◦N] +6 +[255.125◦E, 269.875◦E] +[-14.875◦N, -0.125◦N] +7 +[180.125◦E, 194.875◦E] +[0.125◦N, 14.875◦N] +8 +[195.125◦E, 209.875◦E] +[0.125◦N, 14.875◦N] +9 +[210.125◦E, 224.875◦E] +[0.125◦N, 14.875◦N] +10 +[225.125◦E, 239.875◦E] +[0.125◦N, 14.875◦N] +11 +[240.125◦E, 254.875◦E] +[0.125◦N, 14.875◦N] + diff --git a/5NFKT4oBgHgl3EQf-C45/content/tmp_files/load_file.txt b/5NFKT4oBgHgl3EQf-C45/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..d32b7e7c9a293bad1499a3b32643ed496b6a294f --- /dev/null +++ b/5NFKT4oBgHgl3EQf-C45/content/tmp_files/load_file.txt @@ -0,0 +1,1653 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf,len=1652 +page_content='On the Connection Between MPNN and Graph Transformer Chen Cai 1 Truong Son Hy 1 Rose Yu 1 Yusu Wang 1 Abstract Graph Transformer (GT) recently has emerged as a new paradigm of graph learning algorithms, outperforming the previously popular Message Passing Neural Network (MPNN) on multiple benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' Previous work (Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=', 2022) shows that with proper position embedding, GT can approximate MPNN arbitrarily well, implying that GT is at least as powerful as MPNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' In this paper, we study the inverse connection and show that MPNN with virtual node (VN), a commonly used heuristic with little theoretical understand- ing, is powerful enough to arbitrarily approximate the self-attention layer of GT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' In particular, we first show that if we consider one type of linear transformer, the so-called Per- former/Linear Transformer (Choromanski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' Katharopoulos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=', 2020b), then MPNN + VN with only O(1) depth and O(1) width can approximate a self-attention layer in Per- former/Linear Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' Next, via a connec- tion between MPNN + VN and DeepSets, we prove the MPNN + VN with O(nd) width and O(1) depth can approximate the self-attention layer arbitrarily well, where d is the input fea- ture dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' Lastly, under some assumptions, we provide an explicit construction of MPNN + VN with O(1) width and O(n) depth approxi- mating the self-attention layer in GT arbitrarily well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' On the empirical side, we demonstrate that 1) MPNN + VN is a surprisingly strong baseline, outperforming GT on the recently proposed Long Range Graph Benchmark (LRGB) dataset, 2) our MPNN + VN improves over early implementation on a wide range of OGB datasets and 3) MPNN + VN outperforms Linear Transformer and MPNN on the climate modeling task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' 1University of California San Diego, San Diego, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5NFKT4oBgHgl3EQf-C45/content/2301.11956v1.pdf'} +page_content=' Corre- spondence to: Chen Cai t0, we consider the set of publications by the inspirees +who cite the superstar. For each partitioned group, we calculate the average novelty of all +of the publications in year t per partition. Denoting the set of papers inspired by superstar +9 + +0 +10 +20 +30 +40 +50 +Years After First Superstar Publication +3.1 +3.2 +3.3 +3.4 +I(S) +(A) +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +0 +10 +20 +30 +40 +50 +0.0 +0.4 +0.8 +1.2 +1.6 +I(I) +(B) +0 +5 +10 +15 +20 +Years after Inspired-Paper Pub. +0.0 +0.4 +0.8 +1.2 +1.6 +Citations per Paper +(C) +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +3.254 +3.258 +3.262 +3.266 +I(S) +(D) +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +Inspiration Groups +0.20 +0.24 +0.28 +I(I) +(E) +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +0.35 +0.40 +0.45 +0.50 +0.55 +0.60 +Citations per Paper +(F) +FIG. 2. Novelty and Innovation statistics at the group-level Temporal trajectory of average +paper-level statistics. A: Shannon Entropy, B: Innovation, C: Citations per-paper. Aggregated +group-level statistics D: Shannon Entropy, E: Innovation, F: Citations per-paper. Curves indicate +averages, shaded area 95% confidence interval. +s for partition G at year t as P(G, s, t), the average novelty scores are computed as +⟨I(l) +u ⟩G,s,t = +1 +|P(G, s, t)| +� +u∈P(G,s,t) +I(l) +u +(5) +where l = S, X, I is the novelty or innovation score of paper u. +We plot the results of our analysis in Fig. 2. In terms of the temporal evolution of the +Shannon entropy, while there is a monotonic increase—reflecting an increase in the body +of knowledge with time (Fig. S8)—we find little-to-no differences across the groups as seen +in Fig. 2A. Averaging over the entire temporal range also indicates a flat trend (Fig. 2D). +Similar trends are seen for the reference diversity both in terms of its temporal evolution +(upper panel of Fig. S9A,B) as well as their temporally averaged values (lower panel). Unlike +the entropy or reference diversity, there is a decreasing trend in time for the citation diversity. +We observe a 5% decrease in the measure between those in the top 10% as compared to the +bottom 50%. Figure 2B,E indicates the same trend for Innovation which also decreases in +time across all groups, reflecting a saturation in the number of combinations of new terms +10 + +0.2 +0.4 +0.6 +0.8 +0.07 +0.097 +0.123 +0.15 +0.177 +I(R ) +(A) +All Papers +Excluding Superstar Papers +0.2 +0.4 +0.6 +0.8 +0.07 +0.082 +0.093 +0.105 +I(C ) +(B) +0.2 +0.4 +0.6 +0.8 +3.05 +3.167 +3.283 +3.4 +I(S) +(C) +0.2 +0.4 +0.6 +0.8 +0.0 +1.333 +2.667 +4.0 +I(I) +(D) +0.2 +0.4 +0.6 +0.8 +P ercent of Author P apers that Cite a Superstar +12.0 +16.0 +20.0 +24.0 +Avg. Author Citation Count +(E) +0.2 +0.4 +0.6 +0.8 +8.0 +16.0 +24.0 +32.0 +Avg. Author Publication Count +(F) +FIG. 3. Novelty and Innovation statistics at the individual author-level. A Reference +Diversity, B Citation Diversity, C Shannon Entropy, D Innovation, E Average citation count, F +Average publication count. +that are combined by authors as their career progresses. The difference between the top and +bottom groups is now around 15%. Finally, citations to papers experience an initial boost +and then decreases in time as seen in Fig. 2C, with now much clearer differences between +the groups. Indeed, there is a 40% difference in citations per-paper between the most and +least inspired groups as seen in Fig. 2F. +In terms of redundancy, in Fig. S9C we plot the cosine similarity (Eq. (4). As the figure +indicates, across all groups there is a decreasing trend in the temporal evolution of the +similarity, yet a clear difference exists, whereby papers published by the top 10% are on +average 8% more similar to each other in terms of content when compared to the bottom +50%. Taken together, the results indicate that groups of authors who cite superstar papers +often do get a citation boost as compared to other sets of authors. However, their output is +modestly more innovative and equally novel as compared to the rest of the corpus. Rather +their content is more redundnant than the remaining sets of authors. +Next, we dis-aggregate the group-level results and examine the degree of superstar in- +fluence at the individual author level. In Fig. 3 we plot the averages of the novelty and +innovation metrics as well as citations and publication counts across authors as a function +of the fraction of their papers that cite superstars. Given that many authors co-publish +11 + +with superstars, the blue curve indicates the results when including such papers, while the +orange curve shows the results excluding these papers. Figure 3A-C indicate that as au- +thors cite more superstars they experience an increase in reference and citation diversity as +well as the Shannon entropy irrespective of whether one includes their collaboration with +superstars. While we see no indications of novelty of content being driven by superstar- +influence at the group-level, at the individual level the benefits are clear. On the other hand, +when looking at Innovation (Fig. 3D), the trend is either flat when including all papers, +and decreasing when co-authored publications are excluded. Indeed, it appears that the +more authors cite superstars, the less innovative their own publications become (i.e those +not co-authored with a superstar). The benefit of collaborating with a superstar becomes +even more apparent when looking at citations (Fig. 3E) and number of publications (Fig. 3 +F). For the former when including collaborations there is a dramatic benefit in terms of +garnered citations (approximately 67% more citations on average) that drops considerably +when excluding collaborations. Indeed, the citation-benefit appears to be driven primarily +by being collaborators of superstars who by definition have the largest number of citations to +their papers. The same appears to be the case for the latter, with the number of publications +increasing when including collaborations, and decreasing when excluded. +E. +Early Collaborators and Early Innovators +The results thus far provide evidence for academics inspired by superstars producing out- +put with diverse content and that receives visibility via citations, while not necessarily being +innovative in the sense of tying together new concepts. On the other hand, there is also ev- +idence that these features are significantly boosted by direct collaboration with superstars, +and when left to their own devices their publication output, novelty and innovation is lower +than the rest of the corpus. Indeed, it begs the question whether superstars foster indepen- +dent individual success, or rather inhibits it? For instance, as shown, at the aggregate level, +the group of authors that cite superstars the most often tend to publish on mostly the same +topics. +To further probe this we restrict our analysis to early-career scientists. Given that findings +from prior studies have shown that collaboration with successful scientists provides a boost +12 + +0 +5 +10 +15 +20 +25 +0 +2 +4 +6 +8 +10 +Citations Per Publication +(A) +Including Superstar Papers +Collaborator +Early Innovators +0 +5 +10 +15 +20 +25 +0 +1 +2 +3 +4 +5 +6 +(B) +Excluding Superstar Papers +0 +5 +10 +15 +20 +25 +t − t0 (yr) +0.00 +0.25 +0.50 +0.75 +1.00 +1.25 +1.50 +Innovation +(C) +0 +5 +10 +15 +20 +25 +0.00 +0.25 +0.50 +0.75 +1.00 +1.25 +1.50 +(D) +Citation and Novelty Statistics per Academic Group +FIG. 4. Citations and Innovation for frequent collaborators and early innovators A +Citations per paper when including superstar papers, B The same when excluding superstar papers. +C Temporal evolution of Innovation. D The same when excluding superstar papers. The horizontal +axis t − t0 indicates the time elapsed from the t0 the time of first publication for authors in either +group. +for early career researchers [16], and that early success generates a cumulative advantage of +long-term career success [14], we define early collaborators as those authors who collaborate +with superstars in at least half of their papers in the first five years of their career. As a +point of comparison, we define another set of authors who do not collaborate with, or cite +superstar papers, but are in the top 10% of the corpus in terms of Innovation as measured +by their first five years of publications. We term these authors early innovators. We use +innovation as a metric, given that this is the measure by which superstars outperform other +academics the most (Fig. 1D) and therefore might serve as a robust indicator of academic +potential. +13 + +For academics in each group we track the temporal evolution of the citations per-paper, +the number of publications, as well as the Innovation, measured from the date of first pub- +lication t0 for authors in either group. Early collaborators get more citations per paper +(Fig. 4A) and publish more than early innovators (Fig. S10A) particularly within the first +ten years of their career. However, when one removes superstar publications, the trend re- +verses where now early innovators publish more (Fig. S10B) and garner a comparable rate of +citations as the other group (Fig. 4B ). Additionally the early innovators maintain a higher +degree of Innovation throughout their careers as compared to early collaborators (Fig. 4C, +D) with or without including collaborations to superstars. Thus the evidence suggests that +while early career scientists indeed get a boost from collaborating with superstars, their own +academic output is less innovative and equally visible in terms of citations, as compared +to other early career scientists who produce innovative output without the benefit of such +collaborations. +III. +CONCLUSION AND DISCUSSION +In the exponentially growing knowledge-base of academia in which visibility and funding +are increasingly being biased towards top academics and institutions, we examine the influ- +ence that superstar academics have on the community as a whole and in terms of novelty +and career success. Superstars provide an irreplaceable source of novel ideas and contribu- +tions at rates that exceed those of other academics in the corpus; our metrics support that +their accolades are well deserved and should be rewarded as such. We find superstars are +highly novel and inspire a higher diversity of concepts among their followers and collabo- +rators. However they do inhibit innovation potential. Those academics most inspired by a +superstar are individually themselves more diverse in their papers, but at the group level +add little intrinsic novelty than groups more weakly inspired by the superstar, even though +they achieve higher citations. +Additionally, we find indications of a strong Matthew Effect whereby academics who cite a +superstar highly receive higher citations when collaborating with the superstar than without, +despite higher gains in concept diversity than academic counterparts. Though collaboration +with successful academics can stimulate a successful career path, we find these collaborations +14 + +can stifle innovation and may not provide the best indicator of long-term independent career +success. +Collaboration is a requirement to tackle increasingly difficult interdisciplinary problems. +Superstars are well-positioned to foster interdisciplinary research efforts by supporting early- +career researchers. Although the latter receive a citation boost when collaborate with a +superstar, this does not imply that they are developing more novel work than their colleagues +who are less connected to top academics. In fact, our results indicate that those closest to +a superstar show the lowest innovation potential. This is slightly surprising given that the +literature have shown junior researchers that collaborate with superstars are more likely +to publish in high quality journals and have increased chances of engaging in high quality +research with other top scientists. On balance, however, we find that this does not stimulate +long term independent career success. This could be an indication of individuals getting +lost in the wake of a superstar, meaning these researchers “bandwagon” off the ideas and +visibility of their respective superstars and iterate on the superstar’s work. Although there is +value in iterating upon already developed research questions, this may not foster innovative +work and stimulate individual careers. Indeed, very recently it has been shown that there +is a decline in disruptive ideas in both scientific publications and patents [50]. The authors +attribute this to an ever increasing reliance on a narrower set of extant scientific knowledge +on which to build ideas, a finding very much in line with our observation that followers of +superstars produce redundant and less innovative content as a group. +The observed effects could be a consequence of superstars’ strong hold over their respec- +tive fields. It’s been shown that paradigm shifts in thinking occur after the sudden deaths of +superstars. Collaborators of superstars suffer a drop in publication rate after their superstar +death, and the field may experience a surge of contributions by outsiders who are dispro- +portionately likely to be highly-cited [51]. One can infer that collaborators of superstars +are successful because they are collaborating with superstars. Care should be taken when +considering these proteges themselves for matters of funding and academic hiring. If the +goal is to foster highly novel work, elements outside of prestige and social connection, such +as efficacy, equity, and innovation, should be considered. +Our findings are not limited solely to early innovators, collaborators, and inspirees. +Though we provide early innovators as an example, many other groups [52] can be iso- +15 + +lated and studied in the way we have done here to identify promising academics based on +early signatures of novelty or a range of social parameters. We outlined multiple differ- +ent definitions of novelty in the introduction which we have not further developed in this +study. Implementing the different definitions and distinguishing different types of novelty +can elucidate what types of novelty are stifled or enhanced by different social configurations. +A subject that we have not probed but is directly relevant to our discussion is the matter +of funding. In recent times, funding has increasingly become more biased towards top insti- +tutions [53], with 90% of NSF funding in 2018 going to 22% of funded institutions, serving +43% of all institutions and 34% of underrepresented minorities [54]. This is coupled with a +history of funding disparities with respect to race and underrepresented communities [55–57]. +Additionally, underrepresented groups produce novel works at higher rates yet are taken up +by other scholars at lower rates than novel contributions by gender and racial majorities [46]. +Equitable funding programs have been shown to enhance research infrastructure, investiga- +tor capabilities, and intra- and inter-university collaborations at less prominent institutions +[58]. As we have shown, those that are least influenced by superstars innovate the most +and consequently have higher citation rates. Coupling these results with added attention to +equitable funding practices [59] we believe will reduce the growing inequality in academia +and stimulate novel and innovative research. +Finally, we note that our investigation necessarily comes with limitations. +Given our +sole focus on the APS body of literature, one should be careful to extrapolate this to other +academic disciplines. This is also an incomplete subset of the entire journal, so a full corpus +with an entire citation network would give a more accurate picture. +[1] Cliff, H. How to make an Apple Pie From Scratch In Search of the Recipe for our Universe +(Picador, London, 2021). +[2] McAndrew, S. & Everett, M. +Music as collective invention: +A social network analy- +sis of composers. +Cultural Sociology 9, 56–80 (2014). +URL https://doi.org/10.1177/ +1749975514542486. +[3] Muller, E. & Peres, R. +The effect of social networks structure on innovation perfor- +mance: A review and directions for research. +International Journal of Research in Mar- +16 + +keting 36, 3–19 (2019). +URL https://www.sciencedirect.com/science/article/pii/ +S0167811618300284. +[4] Hazarie, S., Barbosa, H., Frank, A., Menezes, R. & Ghoshal, G. Uncovering the differences +and similarities between physical and virtual mobility. Journal of The Royal Society Interface +17, 20200250 (2020). URL https://doi.org/10.1098/rsif.2020.0250. +[5] Chen, Z. et al. +Contrasting social and non-social sources of predictability in human +mobility. +Nature Communications 13, 1922 (2022). +URL https://doi.org/10.1038/ +s41467-022-29592-y. +[6] Nathaniel Rodriguez, Y.-Y. A., Johan Bollen. Collective dynamics of belief evolution under +cognitive coherence and social conformity. PLoS ONE 11, e0165910 (2016). +[7] Holme, P. & Ghoshal, G. Dynamics of networking agents competing for high centrality and +low degree. Physical Review Letters 96, 098701– (2006). URL https://link.aps.org/doi/ +10.1103/PhysRevLett.96.098701. +[8] Ghoshal, G. & Newman, M. E. J. Growing distributed networks with arbitrary degree distri- +butions. The European Physical Journal B 58, 175–184 (2007). URL https://doi.org/10. +1140/epjb/e2007-00208-2. +[9] Recuero, R., Zago, G. & Soares, F. Using social network analysis and social capital to iden- +tify user roles on polarized political conversations on twitter. +Social Media + Society 5, +2056305119848745 (2019). URL https://doi.org/10.1177/2056305119848745. +[10] Dubois, E. & Gaffney, D. The multiple facets of influence: Identifying political influentials +and opinion leaders on twitter. American Behavioral Scientist 58, 1260–1277 (2014). URL +https://doi.org/10.1177/0002764214527088. +[11] Radicchi, F., Weissman, A. & Bollen, J. Quantifying perceived impact of scientific publica- +tions. Journal of Informetrics 11, 704–712 (2017). URL https://www.sciencedirect.com/ +science/article/pii/S1751157717300846. +[12] Hirsch, J. E. An index to quantify an individual’s scientific research output. Proceedings of +the National Academy of Sciences 102, 16569–16572 (2005). +[13] Nielsen, M. W. & Andersen, J. P. Global citation inequality is on the rise. Proceedings of the +National Academy of Sciences 118, e2012208118 (2021). +[14] Merton, R. K. The matthew effect in science. Science 159, 56–63 (1968). +17 + +[15] Runco, M. & Pritzker, S. Encyclopedia of Creativity. Encyclopedia of Creativity (Elsevier +Science, 2011). +[16] Li, W., Aste, T., Caccioli, F. & Livan, G. Early coauthorship with top scientists predicts +success in academic careers. Nature Communications 10, 5170 (2019). +[17] Sekara, V. et al. The chaperone effect in scientific publishing. Proceedings of the National +Academy of Sciences 115, 12603–12607 (2018). +[18] Xie, Q., Zhang, X., Kim, G. & Song, M. Exploring the influence of coauthorship with top +scientists on researchers’ affiliation, research topic, productivity, and impact. Journal of Infor- +metrics 16, 101314 (2022). URL https://www.sciencedirect.com/science/article/pii/ +S1751157722000669. +[19] Abrahamson, E. & Rosenkopf, L. Social network effects on the extent of innovation diffusion: +A computer simulation. Organization Science 8, 289–309 (1997). URL http://www.jstor. +org/stable/2635149. +[20] Azoulay, P., Graff Zivin, J. S. & Wang, J. Superstar Extinction. The Quarterly Journal of +Economics 125, 549–589 (2010). URL https://doi.org/10.1162/qjec.2010.125.2.549. +https://academic.oup.com/qje/article-pdf/125/2/549/5319678/125-2-549.pdf. +[21] Clauset, A., Arbesman, S. & Larremore, D. B. Systematic inequality and hierarchy in faculty +hiring networks. Science Advances 1, e1400005 (2015). URL https://doi.org/10.1126/ +sciadv.1400005. +[22] Janosov, M., Battiston, F. & Sinatra, R. Success and luck in creative careers. EPJ Data +Science 9, 9 (2020). URL https://doi.org/10.1140/epjds/s13688-020-00227-w. +[23] Bol, T., de Vaan, M. & van de Rijt, A. The matthew effect in science funding. Proceedings of +the National Academy of Sciences 115, 4887–4890 (2018). URL https://www.pnas.org/doi/ +abs/10.1073/pnas.1719557115. https://www.pnas.org/doi/pdf/10.1073/pnas.1719557115. +[24] Petersen, A. M., Jung, W.-S., Yang, J.-S. & Stanley, H. E. Quantitative and empirical demon- +stration of the matthew effect in a study of career longevity. +Proceedings of the National +Academy of Sciences 108, 18–23 (2011). URL https://www.pnas.org/doi/abs/10.1073/ +pnas.1016733108. https://www.pnas.org/doi/pdf/10.1073/pnas.1016733108. +[25] Lazer, D. & Friedman, A. The network structure of exploration and exploitation. Adminis- +trative Science Quarterly 52, 667 – 694 (2007). +18 + +[26] Rodan, S. & Galunic, C. More than network structure: How knowledge heterogeneity influ- +ences managerial performance and innovativeness. Strategic Management Journal 25, 541–562 +(2004). URL http://www.jstor.org/stable/20142143. +[27] Chang, M. & Joseph E. Harrington, J. Discovery and diffusion of knowledge in an endogenous +social network. American Journal of Sociology 110, 937–976 (2005). URL http://www.jstor. +org/stable/10.1086/426555. +[28] Trapido, D. How novelty in knowledge earns recognition: The role of consistent identities. +Research Policy 44, 1488–1500 (2015). +URL https://www.sciencedirect.com/science/ +article/pii/S0048733315000839. +[29] Xu, F. & Evans, J. Flat teams drive scientific innovation. Proceedings of the National Academy +of Sciences 119 (2022). +[30] Hirsch, J. E. Does the h-index have predictive power? Proceedings of the National Academy +of Sciences 104, 19193–19198 (2007). +[31] Hirsch, J. E. An index to quantify an individual’s scientific research output. Proceedings of the +National Academy of Sciences 102, 16569–16572 (2005). URL https://www.pnas.org/doi/ +abs/10.1073/pnas.0507655102. https://www.pnas.org/doi/pdf/10.1073/pnas.0507655102. +[32] American Physical Society. https://journals.aps.org/datasets. +[33] Richardson, +L. +https://sethc23.github.io/wiki/Python/Beautiful_Soup_ +Documentation.pdf. +[34] Caron, E. & van Eck, N.-J. Large scale author name disambiguation using rule-based scor- +ing and clustering. In Noyons, E. (ed.) Proceedings of the Science and Technology Indicators +Conference 2014, 79–86 (Universiteit Leiden, 2014). URL http://sti2014.cwts.nl. Interna- +tional conference on science and technology indicators, STI 2014 ; Conference date: 03-09-2014 +Through 05-09-2014. +[35] El-Kishky, A., Song, Y., Wang, C., Voss, C. R. & Han, J. Scalable topical phrase mining from +text corpora. Proc. VLDB Endow. 8, 305–316 (2014). URL https://doi.org/10.14778/ +2735508.2735519. +[36] Lee, S. Y. Gibbs sampler and coordinate ascent variational inference: A set-theoretical review. +Communications in Statistics - Theory and Methods 51, 1549–1568 (2022). +URL https: +//doi.org/10.1080/03610926.2021.1921214. +19 + +[37] Mimno, D., Wallach, H., Talley, E., Leenders, M. & McCallum, A. +Optimizing semantic +coherence in topic models. In Proceedings of the 2011 Conference on Empirical Methods in +Natural Language Processing, 262–272 (Association for Computational Linguistics, Edinburgh, +Scotland, UK., 2011). URL https://aclanthology.org/D11-1024. +[38] Ouafae, B., Oumaima, L., Mariam, R. & Abdelouahid, L. Novelty detection review state of +art and discussion of new innovations in the main application domains. In 2020 1st Inter- +national Conference on Innovative Research in Applied Science, Engineering and Technology +(IRASET), 1–7 (2020). +[39] Soboroff, I. & Harman, D. Overview of the TREC 2003 novelty track. In Voorhees, E. M. +& Buckland, L. P. (eds.) Proceedings of The Twelfth Text REtrieval Conference, TREC +2003, Gaithersburg, Maryland, USA, November 18-21, 2003, vol. 500-255 of NIST Special +Publication, 38–53 (National Institute of Standards and Technology (NIST), 2003). +URL +http://trec.nist.gov/pubs/trec12/papers/NOVELTY.OVERVIEW.pdf. +[40] Ghosal, T., Saikh, T., Biswas, T., Ekbal, A. & Bhattacharyya, P. +Novelty Detection: +A Perspective from Natural Language Processing. +Computational Linguistics 48, 77–117 +(2022). URL https://doi.org/10.1162/coli_a_00429. https://direct.mit.edu/coli/article- +pdf/48/1/77/2006641/coli a 00429.pdf. +[41] Uzzi, B., Mukherjee, S., Stringer, M. & Jones, B. Atypical combinations and scientific impact. +Science 342, 468–472 (2013). URL https://www.science.org/doi/abs/10.1126/science. +1240474. https://www.science.org/doi/pdf/10.1126/science.1240474. +[42] Schumpeter, J. A. The theory of economic development: An inquiry into profits, capital, credit, +interest, and the business cycle (Theorie der wirtschaftlichen Entwicklung) (Transaction, Edi- +son, NJ, 1934). Translated by Redvers Opie. +[43] Cover, T. & Thomas, J. A. Elements of Information Theory. Wiley Series in Telecommunica- +tions and Signal Processing (Wiley-Interscience, New York, New York, USA, 2006). +[44] Aral, S. & Dhillon, P. +What (exactly) is novelty in networks? +unpacking the vision ad- +vantages of brokers, bridges, and weak ties. Institute for Operations Research and the Man- +agement Sciences (INFORMS) (2021). URL http://dx.doi.org/10.2139/ssrn.2388254. +https://ssrn.com/abstract=2388254. +[45] Kuhn, T. S. The Structure of Scientific Revolutions (University of Chicago Press, Chicago, +20 + +1962). +[46] Hofstra, B. et al. The diversityx2013;innovation paradox in science. Proceedings of the National +Academy of Sciences 117, 9284–9291 (2020). +URL https://www.pnas.org/doi/abs/10. +1073/pnas.1915378117. https://www.pnas.org/doi/pdf/10.1073/pnas.1915378117. +[47] Baten, R. A. et al. Creativity in temporal social networks: how divergent thinking is impacted +by one’s choice of peers. Journal of The Royal Society Interface 17, 20200667 (2020). +[48] Baten, R. A., Aslin, R. N., Ghoshal, G. & Hoque, E. Cues to gender and racial identity +reduce creativity in diverse social networks. Scientific Reports 11, 10261 (2021). URL https: +//doi.org/10.1038/s41598-021-89498-5. +[49] Baten, R. A., Aslin, R. N., Ghoshal, G. & Hoque, M. E. +Novel idea generation in social +networks is optimized by exposure to a “goldilocks” level of idea-variability. PNAS Nexus 1, +pgac255 (2022). +[50] Park, M., Leahey, E. & Funk, R. J. Papers and patents are becoming less disruptive over time. +Nature 613, 138–144 (2023). URL https://doi.org/10.1038/s41586-022-05543-x. +[51] Azoulay, P., Fons-Rosen, C. & Graff Zivin, J. S. +Does science advance one funeral at a +time? American Economic Review 109, 2889–2920 (2019). URL https://www.aeaweb.org/ +articles?id=10.1257/aer.20161574. +[52] He, B., Ding, Y., Tang, J., Reguramalingam, V. & Bollen, J. Mining diversity subgraph in mul- +tidisciplinary scientific collaboration networks: A meso perspective. Journal of Informetrics +7, 117–128 (2013). +[53] Murray, D. L. et al. Bias in research grant evaluation has dire consequences for small universi- +ties. PLOS ONE 11, 1–19 (2016). URL https://doi.org/10.1371/journal.pone.0155876. +[54] of Government Affairs, O. Building america’s stem workforce: Eliminating barriers and un- +locking advantages. Tech. Rep., American Physical Society, 1 Physics Ellipse, College Park, +MD 20740-3844 (2021). +[55] Woodson, +T. & Boutilier, +S. +Impacts for whom? +Assessing inequalities in NSF- +funded broader impacts using the Inclusion-Immediacy Criterion. +Science and Pub- +lic +Policy +49, +168–178 +(2021). +URL +https://doi.org/10.1093/scipol/scab072. +https://academic.oup.com/spp/article-pdf/49/2/168/43395599/scab072.pdf. +[56] Chen, C. Y. et al. Decades of systemic racial disparities in funding rates at the national science +21 + +foundation (2022). URL osf.io/xb57u. +[57] Ginther, D. et al. Race, ethnicity, and nih research awards. Science (New York, N.Y.) 333, +1015–9 (2011). +[58] Harris, L. A. Established program to stimulate competitive research (epscor): Background +and selected issues. Tech. Rep. R44689, Congressional Research Service, 1 Physics Ellipse, +College Park, MD 20740-3844 (2017). +[59] Bollen, J., Crandall, D., Junk, D., Ding, Y. & B¨orner, K. +From funding agencies to sci- +entific agency. EMBO reports 15, 131–133 (2014). URL https://doi.org/10.1002/embr. +201338068. +22 + +Supplementary Information +Creativity and Production in Academic Social Networks +Sean Kelty, Raiyan Abdul Baten, Adiba Proma, Ehsan Hoque, Johann Bollen, Gourab +Ghoshal +CONTENTS +S1. Data +S-2 +S1.1. Summary statistics +S-2 +S1.2. Author Disambiguation +S-4 +S1.3. Coherence of Topic Model +S-5 +S1.4. Example of Topic Representation +S-6 +S1.5. Innovation Example +S-8 +S2. Trends of Novelty and Innovation +S-10 +S3. Author and Paper-Level Novelty and Output +S-11 +S-1 +arXiv:2301.02396v1 [cs.DL] 6 Jan 2023 + +S1. +DATA +S1.1. +Summary statistics +TABLE S1: Summary of citation statistics of corpus with +and without abstracts. +No. Dois +678,916 +No. Dois (w/ abstracts) +250,628 +No. Authors (after disambiguation) +307,894 +No. Superstars +303 +h-index cutoff for superstars +21 +Avg. h-index +1.74 +Avg. No. References per paper +13.5 +Avg. No. References per paper (w/ abstracts) +5.57 +Avg. No. Citations per paper +14.4 +Avg. No. Citations per paper (w/ abstracts) +6.88 +S-2 + +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +0.30 +0.35 +0.40 +Ratio of Extract Abstracts +Ratio of Extracted Abstracts to Total Papers per Journal +PRA +PRAB +PRAPPLIED +PRB +PRC +PRD +PRE +PRFLUIDS +PRL +PRMATERIALS +PRPER +PRRESEARCH +PRSTAB +PRSTPER +PRX +PRXQUANTUM +Physical Review +RMP +Journal +101 +102 +103 +104 +105 +Count of Total Extracted Abstracts +Paper Statistics per Journal in APS +FIG. S1. Upper panel: Proportion of analyzed papers in across APS journals. Lower panel: Count +of all papers in each journal. +100 +101 +102 +Paper-Level Citations +10−5 +10−4 +10−3 +10−2 +10−1 +Density +Citation Distributions +Papers with Abstracts Only +Full Corpus +100 +101 +h-index +10−6 +10−5 +10−4 +10−3 +10−2 +10−1 +100 +h-index Distribution +Citation and h-index Distributions +FIG. S2. Left: Distribution of citations for all papers (orange curve), for papers with extracted +abstracts (blue curve). Right: Distribution of the h-index across all authors in the corpus. +S-3 + +S1.2. +Author Disambiguation +We use a scoring-based method to disambiguate authors in our dataset. +1. Initials +• Two Initials : 5 +• More than 2 initials : 10 +• Conflicting Initials : -10 +2. First Name +• General Name : 3 +• Non-General Name : 6 +(A name is considered general if it has been seen more than 1000 times) +3. Address/Affiliation +• Country,City : 4 +• Country,City,Organization : 7 +• Country,City,Organization,Department : 10 +4. Shared Co-Authors +• one : 4 +• two : 7 +• more than 2 : 10 +5. Source +• Journal : 6 +6. Self-Citation : 10 +7. Bibliographic Coupling (two works referencing a common third work) +• one : 2 +• two : 4 +• three : 6 +• four : 8 +• More than four : 10 +8. Co-citation (Number of times a third work has cited two works) +S-4 + +• one : 2 +• two : 3 +• three : 4 +• four : 5 +• More than 4 : 6 +S1.3. +Coherence of Topic Model +We apply the UMass coherence measure to determine a stable number of topics for +our topic model. This coherence score measures how similar the top words in a topic +are to each other. We aim for the highest possible coherence value that stabilizes in a +neighborhood of the number of topics k. Fig. S3 shows the coherence stablizing at roughly +k = 25 topics. +0 +20 +40 +60 +80 +100 +k +4.0 +3.8 +3.6 +3.4 +3.2 +3.0 +2.8 +2.6 +Coherence (umass) +Coherence of Topic Model +FIG. S3. Coherence Scores of P-LDA Topic Model +S-5 + +S1.4. +Example of Topic Representation +Words and phrases in the corpus, which will generally be referred to as "terms", are +represented by a distribution over latent topics that is the frequency of topic assignments +of the term over the entire corpus. Topics are characterized by the frequency of terms +associated with the topic. For each topic, all terms are ranked based on their relative topic +frequency of their own distribution of the given topic. For example, if a phrase had a +topic distribution for k = 3 topics of [.1,.2,.7], the phrase is representative of topic 3. Terms +are pre-processed by removing stop words and stemming words such that conjugated +versions of the same word can be represented as the same word. +TABLE S2: Topic Model Summary of most representative +terms per topic. +Topic +Number +Representative Terms +Topic 1 +crystal film, ultrathin, mtj, stm tip, stack, freestand, high resolution angle, franz, stm, force +micrscop +Topic 2 +center cubic fcc, temperature addit, measur x, tc cuprat, temperature down k, temperature k k, tc +k, tc superconduct, tc superconductor, temperature tc k +Topic 3 +spectral line, ωp, raman line, absorpt part, absorpt line, nd3, electroreflect. eliashberg, b1g, endor +Topic 4 +axial magnet, spin angular, moment inertia, moment magnet, parallel magnet field, magnet +revers, torqu, interlay exchange, spin texture, moriya +Topic 5 +collim, electron eject, ion yield, ion trap, n4, ion produc, ion plasma, damag, wall carbon, electron +drift +Topic 6 +cauchi, broken time, takahashi, hamilton jacobi, symmetri spontan, tachyon, ward ident, polyakov, +loop quantum cosmolog, coulomb guage +Topic 7 +excitatori, hub, infect, epidem, volatil, exactli solvabl model, network model, synaps, synapt, +integr fire +Topic 8 +nonequilibrium phase transit, first order phase transit, j’, glass order, thouless transit, glass like, +glass former, triangluar lattic, nearest neighbor coupl, nearest neighbor distanc +S-6 + +Topic 9 +magnitude higher, larg part, fourth gener, even though, order qcd, select rule, third, mach zehnder +interferomet, even larger, order raman +Topic 10 +quasilinear, langevin equat, gilbert equat, equate state eo, sand, attractor, classic chaotic, eulerian, +chimera state, euler equat +Topic 11 +advanc ligo, mit bag, catalog, model background, dark sector, dark matter, sight, model dark, sky, +sno +Topic 12 +nest, der waal force, nodal line, helic edg, non fermi, state degeneraci, hove, majorana zero, +majorana bound, sdh +Topic 13 +three dimension 3d, basin attract, fuld ferrel, dimension squar, lz, trap bose, bodi effect, bodi forc, +hard core boson, fermion atom +Topic 14 +highest occupi molecular, muffin tin orbit, gaas1, clathrat, cl2, cl, hexagon boron, interstiti, gell, ci +Topic 15 +puls width, optic parametr, sapphir laser, exciton biexciton, optic pump, harmon gener shg, optic +puls, inxga1 xa, optic nonlinear, ultrastrong +Topic 16 +clauser, horn shimoni holt, simpl analyt express, us deriv, part paper, analyt formula, cb, exact +forumla, exact expression, pauli exclus +Topic 17 +agre reason, foudn good agreement, recent experiment data, find excel agreement, find good +agreement, theoret data, theoret cross, reason agreement experiment, found excel agreement, +good agreement experimental result +Topic 18 +qutrit, regist, processor, studi entagle, protocol, markovian dynam, purif, decoy state, qkd, error +correct +Topic 19 +nucleon nucleon scatter, deep inelast scatter, total angular momentum, inclus cross, transfer cross +section, multifragment, multiperipher, depend cross section, forward angle, πn +Topic 20 +full potenti linear augment plane wave, wave born, wannier function, impuls, infield, use path, +use mont, within densiti function, jastrow, use harte +Topic 21 +avoid walk, nonergod, time τ, time tail, time t2, time t2, dimension diffus, time random, nonex- +ponenti, msd +Topic 22 +even even nuclei, xe136, rich nuclei, gt, v’, p0, cf252, α p, α reaction, p1 +Topic 23 +director field, shear modulu, homeotrop, tλ, antivortex, humid, u0, hydrophil, shear band, shear +strain +S-7 + +Topic 24 +signific role, key role, kibbl zurek, amino acid, play essenti, play domin, play crucial, play critical, +play central, remain elus +Topic 25 +paramet η, ev2, rev c, rev lett, eq, right hand, right left, e e collide, e e annhil, f0 +TABLE S3: Example of Phrase-Topic Distributions. +Term +Topic-Embedding +... +Quantiz +[1, 0, 0, 0, 0 2259, 0, 0, 560, 0, 0, 882, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 677, 0, 0] +Quantum +[29, 0, 0, 21, 0, 4304, 1069, 4276, 0, 308, 0, 6008, 454, 46, 14920, 0, 0, 35931, 0, 1828, 0, 0, +1384, 7, 1] +Quark +[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 239, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14542] +Quarkonia +[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 125] +Quarkonium +[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 299] +Quarter +[0, 0, 0, 0, 30, 0, 0, 0, 0, 0, 0, 321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] +... +Quantum Wire +[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 342, 0, 0, 292, 0, 0, 23, 0, 0, 0, 0, 91, 0, 0] +Quantum Zeno +[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0] +Quark Antiquark +[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 433] +Quark Condens +[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107] +Quark Decay +[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25] +... +S1.5. +Innovation Example +The innovation metric counts the first time a term or a new combination of terms have +been seen in an article over the entire corpus. Fig. S4 shows the introduction of the terms +"quantum" and "cosmolog" in the corpus. Note that "cosmolog" is the root of words such +as "cosmology" and "cosmological" that were lemmatized in pre-processing. We plot the +frequency of the terms in time as well as vertical lines representing the first year the term +S-8 + +has been seen. We also plot the counts of the phrase "quantum cosmolog" which is an +additionally considered term in our topic model. +1925 +1950 +1975 +2000 +Year +0 +500 +1000 +1500 +2000 +Yearly Counts +Quantum +Cosmolog +Quantum Cosmolog +1925 +1950 +1975 +2000 +Year +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +Density +FIG. S4. Example of innovation measure with terms "quantum" and "cosmolog" +S-9 + +S2. +TRENDS OF NOVELTY AND INNOVATION +FIG. S5. Correlations between all novelty and innovation measures based on Pearson’s r. +S-10 + +Correlation of Novelty Measures +R^2:0.00137 +R^2:0.000218 +R^2:0.00737 +0.40 +0.40 +0.35 +0.35 +4.0 +3.5 +OE0 +Citation Diversity +3.0 +0.25 +0.25 +Reference +0.20 +0.20 +2.0 + 0.15 +0.15 +0.10 +0.10 +1.0 +0.05 +0.05 +0.5 +0.00 +0.00 +0.0 +10 +12 +12 +14 +Innovativeness +Innovativeness +Innovativeness +R^2:0.062 +R^2:0.0947 +R^2: 0.0529 +0.40 +0.35 +0't +0't +3.5 +3.5 +OE0 +Diversity +0.25 +2.5 +2.5 +0.20 +Citation +2.0 +2.0 +0.15 +15 +0.10 +1.0 +0.05 +0.5 +0.5 +0.00 +0.0 +0.40 +0.10 +0.0 - +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +0.30 +0.35 +0.00 +0.05 +0.15 +0.20 +0.25 +0.30 +0.35 +0.40 +0.00 +0.05 +0.10 +0.15 +0.20 +0.25 +0.30 +0.35 +0.40 +Reference Diversity +Reference Diversity +Citation DiversityS3. +AUTHOR AND PAPER-LEVEL NOVELTY AND OUTPUT +0 +20 +40 +0.095 +0.102 +0.108 +0.115 +Author-Level Novelty +I(R)(A) +All Papers +Without Superstar Papers +0 +20 +40 +Number of Author Publications +0.075 +0.083 +0.092 +0.1 +I(C)(B) +0 +20 +40 +3.16 +3.183 +3.207 +3.23 +3.253 +I(S)(C) +0 +20 +40 +0.0 +3.333 +6.667 +10.0 +I(I) (D) +Author-Level Novelty Scores vs Author Success +FIG. S6. Novelty and innovation metrics of an author’s publication record as a function of +their number of publications. Authors with between 1-50 publications in the corpus have been +considered. +100 +101 +102 +0.09 +0.103 +0.117 +0.13 +Paper-Level Novelty +I(R)(A) +All Papers +Without SS Papers +101 +102 +Number of Citations +0.02 +0.07 +0.12 +0.17 +I(C)(B) +100 +101 +102 +3.1 +3.15 +3.2 +3.25 +I(S)(C) +100 +101 +102 +0.0 +0.333 +0.667 +1.0 +I(I) (D) +Paper-Level Novelty Scores vs Paper Success +FIG. S7. Novelty and innovation metrics of an author’s publication record as a function of the +number of citations their papers garner. +S-11 + +1970 +1980 +1990 +2000 +2010 +2020 +3.050 +3.075 +3.100 +3.125 +3.150 +3.175 +3.200 +3.225 +3.250 +Entropy +1970 +1980 +1990 +2000 +2010 +2020 +Year +0.080 +0.085 +0.090 +0.095 +0.100 +0.105 +0.110 +0.115 +0.120 +Reference Diversity +1970 +1980 +1990 +2000 +2010 +2020 +0.04 +0.06 +0.08 +0.10 +0.12 +Citaiton Diversity +1970 +1980 +1990 +2000 +2010 +2020 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +Innovation +Average Novelties per Year +FIG. S8. All Novelty Measures per Year. +0 +10 +20 +30 +40 +50 +0.09 +0.10 +0.11 +0.12 +0.13 +0.14 +0.15 +Temporal +Reference Diversity (A) +group +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +0 +10 +20 +30 +40 +50 +Years After First SS Pub. +0.08 +0.09 +0.10 +0.11 +0.12 +0.13 +0.14 +0.15 +Citation Diversity (B) +group +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +0 +10 +20 +30 +40 +50 +0.500 +0.525 +0.550 +0.575 +0.600 +0.625 +0.650 +0.675 +0.700 +Similarities (C) +group +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +0.1290 +0.1295 +0.1300 +0.1305 +0.1310 +Aggregate +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +Inspiration Groups +0.099 +0.100 +0.101 +0.102 +0.103 +0.104 +0.00-0.10 +0.10-0.20 +0.20-0.30 +0.30-0.50 +0.50-1.00 +0.54 +0.55 +0.56 +0.57 +0.58 +Concept Diversity and Similarities of Inspired Groups +FIG. S9. (A) Reference Diversity, (B) Citation Diversity, (C) Within-group paper similarities for +the followers of a superstar partitioned by level of inspiration. Upper panel: temporal evolution. +Lower panel: averaged in time. +S-12 + +0 +5 +10 +15 +20 +25 +t − t0 (yr) +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +1.2 +1.4 +Yearly Publications +Including Superstar Papers +Early Collaborators +Early Innovators +0 +5 +10 +15 +20 +25 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +Excluding Superstar Papers +Publication Rate per Academic Group +FIG. S10. Publication rates of academic groups, LEFT including superstar collabortions and RIGHT +excluding superstar collaborations +S-13 + diff --git a/6NE0T4oBgHgl3EQfewDQ/content/tmp_files/load_file.txt b/6NE0T4oBgHgl3EQfewDQ/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba65fc8017da93c0a455800fb76695ca44552b5e --- /dev/null +++ b/6NE0T4oBgHgl3EQfewDQ/content/tmp_files/load_file.txt @@ -0,0 +1,1647 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf,len=1646 +page_content='Don’t follow the leader: Independent thinkers create scientific innovation Sean Kelty,1 Raiyan Abdul Baten,2 Adiba Mahbub Proma,2 Ehsan Hoque,2 Johan Bollen,3 and Gourab Ghoshal1, 2, ∗ 1Department of Physics & Astronomy, University of Rochester, Rochester, NY 14607, USA 2Department of Computer Science, University of Rochester, Rochester, NY 14607, USA 3Luddy School of Informatics, Computing and Engineering, 919 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 10th St.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Bloomington, IN 47408, USA Abstract Academic success is distributed unequally;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' a few top scientists receive the bulk of attention, citations, and resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' However, do these “superstars” foster leadership in scientific innovation?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We introduce three information-theoretic measures that quantify novelty, innovation, and impact from scholarly citation networks, and compare the scholarly output of scientists who are either not connected or strongly connected to superstar scientists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We find that while connected scientists do indeed publish more, garner more citations, and produce more diverse content, this comes at a cost of lower innovation and higher redundancy of ideas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Further, once one removes papers co-authored with superstars, the academic output of these connected scientists diminishes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In contrast, authors that produce innovative content without the benefit of collaborations with scientific superstars produce papers that connect a greater diversity of concepts, publish more, and have comparable citation rates, once one controls for transferred prestige of superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' On balance, our results indicate that academia pays a price by focusing attention and resources on superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ∗ Correspondence email address: gghoshal@pas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='rochester.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='edu 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='02396v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='DL] 6 Jan 2023 I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' INTRODUCTION “To truly make an apple pie from scratch you must first invent the universe”—a quote attributed to Carl Sagan [1]—illustrates the idea that the process by which individuals cre- ate is contingent upon the elements on which that creation is based.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Whether creating a new piece of music, going about daily routines, or engaging in scientific research, people’s actions are founded in the information, experiences, and relationships that they have estab- lish by themselves and through others [2–5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Each person has their own basis of knowledge that stems from their own lived experiences while also existing in a network of relationships through which they share experiences and knowledge with each other, thereby informing a collective understanding among a network of connected individuals [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Within such net- works, hierarchies can emerge in which some actors exert greater social influence over the network and thus the creative process that it supports, while others may influence only those closest to them or no one at all [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This social hierarchy is common in the societal dynamics of government and politics, where some individuals and institutions exert a great degree of influence over the flow of information in the system and opinion formation [8–10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Academia is not immune from the emergence of social hierarchies;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' some academics can function as figures of authority due to the merit and influence of their work and their promi- nent position in a network of academic collaborations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Citations as an indicator of academic influence [11] have long been known to be distributed very unequally[12], with a minority of a few scientists receiving most citations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Such inequality may be increasing at a global level[13], at least with respect to citation numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In academic publishing, biasing effects like this have been studied under the lens of the Matthew Effect, where success begets more success and early success compounds into a cumulative advantage as the “rich get richer” [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' There are arguments that this effect is beneficial for academia;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' the rewards of top researchers are proportional to their contributions, which ensures the “epistemic security” of the field [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This thinking is aligned with the notion that science should operate as a meritocracy;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' those who contribute the most are also valued the most, and will therefore be most influential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Indeed, there is a high degree of trust in our most successful academics and the value of their mentorship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' For instance, junior researchers collaborating with top scien- tists at the early stages of their career are likely to become top-cited scientists themselves, 2 especially those at less prestigious universities [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Inexperienced academics can benefit from apprenticeships with top scientists;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' the “chaperoning” of early-career scientists leads to higher rates of publication in high-impact journals [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' These relationships are frequently mutually beneficial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Less visible authors benefit from more opportunities to publish papers in high quality journals that attract larger audiences, whereas top scientists gain collabo- rators with unique skills to produce more high quality work [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Close collaboration of less visible academics with those in the upper echelons can furthermore create opportunities for a first-mover advantage, inducing a positive feedback loop and early bandwagoning of innovative ideas [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' While top academics (sometimes referred to as “superstars”) may make consistent and high impact contributions that benefit their field and collaborators, their status as super- stars may also have deleterious effects due to the subsequent concentration of resources and attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' For instance, it has been shown that the collaborators of academic superstars experience a 5 to 9% drop in publication rates after the sudden death of that superstar [20], highlighting their dependence on the superstar’s collaboration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In fact, it is unclear whether collaborating with superstars truly fosters independent career development [21, 22] Further- more, superstars can induce a high degree of inequality in the distribution of research funding due to a funding Matthew-effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Those who receive funding accumulate twice as much re- search funding afterwards compared to those who submitted similarly valued proposals but found themselves, by chance, just below the funding threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' There is no evidence that this accumulation of research funding is due to actual achievements enabled by previous funding [23, 24].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' If successful collaborations with superstars lead to early funding success, this can induce a superstar-fueled funding cycle that increasingly widens the gap between scientific haves and have-nots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The topology, structure, and characteristics of scientific collaboration networks may play an important role in these effects since they shape both the production and dissemination of ideas, potentially with conflicting outcomes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Tightly connected networks could be more efficient in distributing and leveraging knowledge thereby yielding higher productivity, but may at the same time lead to a decline of diversity, reducing exploration and discovery [25– 27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Although some spillover effects may occur, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' collaborators of highly-acclaimed authors benefit by proxy [28], it is not clear whether the concentration of attention of resources 3 towards superstars yields more novel and innovative research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This is a particularly relevant issue with the rise of interdisciplinary research which relies on the ability of scientists to collaborate in equitable teams that foster creativity and innovation across various research fields [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' To investigate the effects of superstar influence on academic productivity, impact, and innovation, we perform a comprehensive analysis of the American Physical Society corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Following [20], we define superstars as academics who are among the top .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1% in terms of their h-index [30, 31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We extract the semantic content of over 250,000 abstracts, defining a number of information-theoretic measures to quantify the novelty and innovation of each paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We augment this with analysis of publication and citation rates, and examine the difference in academic output between researchers who collaborate with or cite frequently papers by superstars against those with little-to-no connection to such superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We find that at the individual level, collaborators and frequent citers of superstars, publish more, garner higher citations and produce papers with more diverse content compared to other academics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' However, their work is no more innovative than the rest of the corpus and its content is more redundant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Further, once one excludes papers co-authored with superstars, their publication and citation output are no different from the rest of the corpus and in some cases output is lower.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Focusing on early career researchers, we find that those who frequently collaborate with superstars in the beginning of their careers, do eventually go on to produce impressive academic output, although once the collaboration is removed, their output in terms of publi- cation rates, citation impact, and innovation is significantly diminished.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' On the other hand, early career researchers that produce innovative content without the benefit of early super- star collaboration, continue to produce such content over the rest of their careers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' They publish more then early collaborators of superstars and accrue similar citation numbers, once one controls for the collaboration itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 4 II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' RESULTS A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Data We use the American Physical Society (APS) corpus [32] that contains articles published in APS journals since 1893.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The data set contains full citation data, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' the citations pointing from the references of one article to another, allowing a reconstruction of the full citation network among all articles, including article-specific fields such as DOI, journal, volume, issue, first page and last page OR article id and number of pages, title, authors, affiliations, publication history, PACS codes, table of contents heading, article type, and copyright information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Given that the data does not include article abstracts, we used a web-scraping algorithm [33] to collect abstracts for 250,628 articles corresponding to between 35-40% of all published papers across the different APS journals (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We note that around 1% of these articles have references not contained in the APS citation network, and on average we scraped abstracts for 38% of paper references.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The distribution of citations and h-index are both heavy-tailed (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S2), with the average number of citations being 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 and the average h-index 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Author disambiguation was done using a rule-based scoring method [34] (Cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2) We consider authors who first publish on or after 1970, and define superstars as those with the top .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1% of h-index in the corpus, corresponding to an h-index threshold of 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This yields 303 superstars among 292,394 authors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The summary statistics can be found in Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In order to extract topics from the collected abstracts, we use an unsupervised Latent Dirichlet Allocation (LDA) algorithm on phrases (P-LDA) [35] to establish vector embed- dings for phrases and documents within our corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Stop words in the corpus were removed, all words were lemmatized, and phrases were determined based on a significance score that determined whether or not phrases occurred due to random chance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' These vector embed- dings have dimensionality k correspoding to the number of topics defined for our corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' P-LDA utilizes Gibbs Sampling to generate distributions of topics over phrases as well as documents [36], from which novelty scores can be extracted based on topic-spread.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We choose a number of topics k based on the UMass coherence measure ([37]), the value of which first stabilizes at k = 25 topics (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S2 shows the top 10 terms per topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The resulting output for each document u is a k-dimensional vector vu whose elements vu i 5 correspond to the frequency of topic i extracted from its abstract (example in Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Novelty, innovation and redundancy Novelty detection in the literature has been implemented in a variety of ways [38], such as contextualizing novelty in machine learning as information retrieval [39, 40], distant com- binations of ideas via citation relations [41], first-pass combinations of concepts never before connected [42], knowledge-graphs of concepts within social networks [26], and agent-based simulations of social and individual learning [27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Here we rely on document-level embeddings that represent a distribution of all topics contained within the abstract of given paper, using which one can define the topic diversity in terms of a paper, its references, and articles that cite the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Using this, we define a variety of metrics capturing different aspects of novelty and innovation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Coupling connections between authors and the content of their works can then elucidate the influence that superstars have on the success of and novelty produced by other academics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Entropy: For a given document u, we define the Shannon entropy as I(S) u = − k � i=1 vu i ln vu i , (1) The expression quantifies the average level of “surprise” or uncertainty over the outcomes of a random variable [43].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In this context, papers focusing on limited number of topics in abstracts will yield low values of I(S) u , whereas those with a wide diversity of topics will yield a larger value of the entropy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Reference and Citation Diversity: While I(S) u measures the “surprise” with respect to a paper’s content, in this case its abstract, references and citations refer to the degree that the ideas in a given paper were inspired by other papers (references) or of inspiration to other papers (citations).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We can thus measure the novelty of a paper, or its Information Diversity [44], by evaluating the dispersion of the topics of its references or the citations its receives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The greater the variance of the topic distribution, the higher the information diversity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' For a set Xu, that can represent either the references in paper u, or citations to paper u, we 6 define the quantity, I(X) u = 1 |Xu| � l∈Xu � 1 − cos � vl, Xu�� (2) where cos � vl, Xu� is the cosine similarity of the vector embedding of a particular refer- ence/citation vl with the average over the vector embeddings of all references/citations in the set Xu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We can as such define reference diversity and citation diversity as the information diversity over the references from a paper and citations to the paper respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Innovation: The metrics defined thus far are based on topic models expressed as topic dis- tributions per document derived from the words in their content (abstracts).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' These metrics capture topic diversity of the paper itself, or its influences, but does not express the degree to which the paper expanded the literature through innovation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In other words, they express what document themselves are about, but not whether this adds to the diversity of the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We therefore define Innovation as the degree to which the document adds topics in new combination to the literature [45, 46].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Specifically, innovation in this context, is a measurement of when terms were first introduced or combined in the corpus (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Coupled with the novelty measures, this allows us to track how the diversity of ideas correlates with new conceptual recombinations and co-occurrences of terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Following this logic, we define the Innovativeness of paper u as I(I) u = 1 2 � w1̸=w2∈u I(w1, w2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' u) (3) where w1 and w2 are distinct terms in paper u, I(w1, w2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' u) is an indicator function that is 1 if terms w1 and w2 are first seen within the corpus in paper u and 0 otherwise, and the 1 2 prefix accounts for double counting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' To remove spurious conceptual links due to chance or extreme rarity, we calculate a point-wise mutual information for all links as the log ratio of co-occurrence probability over the individual probabilities of each concept [46].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S5 we determine the Pearson’s r correlation coefficients between each measure and find only weak correlations, indicating that each measure captures a different aspect of academic output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Redundancy: Finally, in a related context, in the field of creative ideation, it has been reported that inspirees stimulated by highly creative alters, tend to generate more creative ideas [47–49].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' However, as a group, the inspirees ideas was found to be similar to each other 7 leading to redundancy in generated ideas over time at the group level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' To check whether a similar effect manifests in academic publishing, we compute the cosine similarity score between papers u, u′ in the set P(G, s, t) thus Sim(G, s, t) = 2 |P(G, s, t)| (|P(G, s, t)| − 1) � u,u′∈P(G,s,t) cos(vu, vu′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' (4) C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Superstar statistics We next examine whether the novelty and innovation produced by superstars are sig- nificantly different from the rest of the academic corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1 we plot the Reference and Citation diversity (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' (2)), the Shannon entropy (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' (1)) and Innovation (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' (3)) comparing the set of superstar academics against the rest of the authors in the corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In terms of reference diversity, citation diversity and Shannon entropy, superstars outperform the remaining academics by 20%, 15%, and 2% respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' That is, superstars are inspired by a higher diversity of content, publish works that are more conceptually diverse, and in- spire a wider array of publications than non-superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The starkest contrast can be seen in terms of Innovation, where there is a factor of ten difference between superstars and other academics indicating that the former are more prolific in introducing new combinations of terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We note that there is a monotonic dependence of the metrics with number of pub- lications for all academics, although the effect is more pronounced for superstars (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Furthermore, there is also a monotonic dependence of citations received by a paper u and the novelty/innovation metrics (once again more pronounced for superstars) indicating that an increase in conceptual diversity and the ability to connect concepts for the first time is rewarded in terms of more attention paid to that paper (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Superstar influence Having established that superstars outperform other academics in terms of our metrics, we next determine to what degree superstars affect the academic output of their collabo- rators and their “inspirees” (those inspired by their work).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Inspirees are authors that cite a superstar’s papers, for whom we determine the degree of inspiration by the frequency of 8 Non-Superstar Superstar 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='13 I(R ) (A) Non-Superstar Superstar 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='12 I(C ) (B) Non-Superstar Superstar 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='3 I(S) (C) Non-Superstar Superstar 0 5 10 15 20 I(I) (D) FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Average author-level statistics of novelty and innovation A Reference Diversity, B Citation Diversity, C Shannon Entropy, D Innovation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The orange bar is for superstars (h-index ≥ 21) and the blue bars correspond to all other authors in the corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' citations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We examine inspirees both at the group- and individual-levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' At the group- level, we center the superstar in a network of inspirees where the degree of inspiration is the number of times a researcher cites the superstar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We then partition the inspirees into groups based on their degree of inspiration, where the upper bounds for each bin are the top 10% of inspirees, 20%, 30%, 50%, and 100%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' These groups represent increasingly weakening ties to a given superstar;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' those in the top 10 percent are the most actively inspired, while the bottom 50 percent typically cite the superstar only once.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Note that some inspirees in the bottom 50 group of one superstar may be in the top group of another superstar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The increasing bin sizes are chosen to account for the decreasing frequency of inspired citations among the least-inspired inspirees, such that there are sufficient number of papers compared between groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Given that we are interested in the temporal evolution of superstar influence on the novelty and innovation of the inspirees, we denote the year of the first superstar publication as t0 = 0 and for every susbsequent year t > t0, we consider the set of publications by the inspirees who cite the superstar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' For each partitioned group, we calculate the average novelty of all of the publications in year t per partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Denoting the set of papers inspired by superstar 9 0 10 20 30 40 50 Years After First Superstar Publication 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 I(S) (A) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0 10 20 30 40 50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 I(I) (B) 0 5 10 15 20 Years after Inspired-Paper Pub.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 Citations per Paper (C) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='254 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='258 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='262 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='266 I(S) (D) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 Inspiration Groups 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='28 I(I) (E) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='60 Citations per Paper (F) FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Novelty and Innovation statistics at the group-level Temporal trajectory of average paper-level statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A: Shannon Entropy, B: Innovation, C: Citations per-paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Aggregated group-level statistics D: Shannon Entropy, E: Innovation, F: Citations per-paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Curves indicate averages, shaded area 95% confidence interval.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' s for partition G at year t as P(G, s, t), the average novelty scores are computed as ⟨I(l) u ⟩G,s,t = 1 |P(G, s, t)| � u∈P(G,s,t) I(l) u (5) where l = S, X, I is the novelty or innovation score of paper u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We plot the results of our analysis in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In terms of the temporal evolution of the Shannon entropy, while there is a monotonic increase—reflecting an increase in the body of knowledge with time (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S8)—we find little-to-no differences across the groups as seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 2A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Averaging over the entire temporal range also indicates a flat trend (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 2D).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Similar trends are seen for the reference diversity both in terms of its temporal evolution (upper panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S9A,B) as well as their temporally averaged values (lower panel).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Unlike the entropy or reference diversity, there is a decreasing trend in time for the citation diversity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We observe a 5% decrease in the measure between those in the top 10% as compared to the bottom 50%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Figure 2B,E indicates the same trend for Innovation which also decreases in time across all groups, reflecting a saturation in the number of combinations of new terms 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='097 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='123 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='177 I(R ) (A) All Papers Excluding Superstar Papers 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='082 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='093 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='105 I(C ) (B) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='167 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='283 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 I(S) (C) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='333 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='667 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 I(I) (D) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 P ercent of Author P apers that Cite a Superstar 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Author Citation Count (E) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Author Publication Count (F) FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Novelty and Innovation statistics at the individual author-level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A Reference Diversity, B Citation Diversity, C Shannon Entropy, D Innovation, E Average citation count, F Average publication count.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' that are combined by authors as their career progresses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The difference between the top and bottom groups is now around 15%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Finally, citations to papers experience an initial boost and then decreases in time as seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 2C, with now much clearer differences between the groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Indeed, there is a 40% difference in citations per-paper between the most and least inspired groups as seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 2F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In terms of redundancy, in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S9C we plot the cosine similarity (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' (4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' As the figure indicates, across all groups there is a decreasing trend in the temporal evolution of the similarity, yet a clear difference exists, whereby papers published by the top 10% are on average 8% more similar to each other in terms of content when compared to the bottom 50%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Taken together, the results indicate that groups of authors who cite superstar papers often do get a citation boost as compared to other sets of authors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' However, their output is modestly more innovative and equally novel as compared to the rest of the corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Rather their content is more redundnant than the remaining sets of authors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Next, we dis-aggregate the group-level results and examine the degree of superstar in- fluence at the individual author level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 3 we plot the averages of the novelty and innovation metrics as well as citations and publication counts across authors as a function of the fraction of their papers that cite superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Given that many authors co-publish 11 with superstars, the blue curve indicates the results when including such papers, while the orange curve shows the results excluding these papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Figure 3A-C indicate that as au- thors cite more superstars they experience an increase in reference and citation diversity as well as the Shannon entropy irrespective of whether one includes their collaboration with superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' While we see no indications of novelty of content being driven by superstar- influence at the group-level, at the individual level the benefits are clear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' On the other hand, when looking at Innovation (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 3D), the trend is either flat when including all papers, and decreasing when co-authored publications are excluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Indeed, it appears that the more authors cite superstars, the less innovative their own publications become (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='e those not co-authored with a superstar).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The benefit of collaborating with a superstar becomes even more apparent when looking at citations (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 3E) and number of publications (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 3 F).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' For the former when including collaborations there is a dramatic benefit in terms of garnered citations (approximately 67% more citations on average) that drops considerably when excluding collaborations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Indeed, the citation-benefit appears to be driven primarily by being collaborators of superstars who by definition have the largest number of citations to their papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The same appears to be the case for the latter, with the number of publications increasing when including collaborations, and decreasing when excluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Early Collaborators and Early Innovators The results thus far provide evidence for academics inspired by superstars producing out- put with diverse content and that receives visibility via citations, while not necessarily being innovative in the sense of tying together new concepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' On the other hand, there is also ev- idence that these features are significantly boosted by direct collaboration with superstars, and when left to their own devices their publication output, novelty and innovation is lower than the rest of the corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Indeed, it begs the question whether superstars foster indepen- dent individual success, or rather inhibits it?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' For instance, as shown, at the aggregate level, the group of authors that cite superstars the most often tend to publish on mostly the same topics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' To further probe this we restrict our analysis to early-career scientists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Given that findings from prior studies have shown that collaboration with successful scientists provides a boost 12 0 5 10 15 20 25 0 2 4 6 8 10 Citations Per Publication (A) Including Superstar Papers Collaborator Early Innovators 0 5 10 15 20 25 0 1 2 3 4 5 6 (B) Excluding Superstar Papers 0 5 10 15 20 25 t − t0 (yr) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 Innovation (C) 0 5 10 15 20 25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 (D) Citation and Novelty Statistics per Academic Group FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Citations and Innovation for frequent collaborators and early innovators A Citations per paper when including superstar papers, B The same when excluding superstar papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' C Temporal evolution of Innovation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' D The same when excluding superstar papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The horizontal axis t − t0 indicates the time elapsed from the t0 the time of first publication for authors in either group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' for early career researchers [16], and that early success generates a cumulative advantage of long-term career success [14], we define early collaborators as those authors who collaborate with superstars in at least half of their papers in the first five years of their career.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' As a point of comparison, we define another set of authors who do not collaborate with, or cite superstar papers, but are in the top 10% of the corpus in terms of Innovation as measured by their first five years of publications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We term these authors early innovators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We use innovation as a metric, given that this is the measure by which superstars outperform other academics the most (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1D) and therefore might serve as a robust indicator of academic potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 13 For academics in each group we track the temporal evolution of the citations per-paper, the number of publications, as well as the Innovation, measured from the date of first pub- lication t0 for authors in either group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Early collaborators get more citations per paper (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 4A) and publish more than early innovators (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S10A) particularly within the first ten years of their career.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' However, when one removes superstar publications, the trend re- verses where now early innovators publish more (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S10B) and garner a comparable rate of citations as the other group (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 4B ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Additionally the early innovators maintain a higher degree of Innovation throughout their careers as compared to early collaborators (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 4C, D) with or without including collaborations to superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Thus the evidence suggests that while early career scientists indeed get a boost from collaborating with superstars, their own academic output is less innovative and equally visible in terms of citations, as compared to other early career scientists who produce innovative output without the benefit of such collaborations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' CONCLUSION AND DISCUSSION In the exponentially growing knowledge-base of academia in which visibility and funding are increasingly being biased towards top academics and institutions, we examine the influ- ence that superstar academics have on the community as a whole and in terms of novelty and career success.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Superstars provide an irreplaceable source of novel ideas and contribu- tions at rates that exceed those of other academics in the corpus;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' our metrics support that their accolades are well deserved and should be rewarded as such.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We find superstars are highly novel and inspire a higher diversity of concepts among their followers and collabo- rators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' However they do inhibit innovation potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Those academics most inspired by a superstar are individually themselves more diverse in their papers, but at the group level add little intrinsic novelty than groups more weakly inspired by the superstar, even though they achieve higher citations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Additionally, we find indications of a strong Matthew Effect whereby academics who cite a superstar highly receive higher citations when collaborating with the superstar than without, despite higher gains in concept diversity than academic counterparts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Though collaboration with successful academics can stimulate a successful career path, we find these collaborations 14 can stifle innovation and may not provide the best indicator of long-term independent career success.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Collaboration is a requirement to tackle increasingly difficult interdisciplinary problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Superstars are well-positioned to foster interdisciplinary research efforts by supporting early- career researchers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Although the latter receive a citation boost when collaborate with a superstar, this does not imply that they are developing more novel work than their colleagues who are less connected to top academics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In fact, our results indicate that those closest to a superstar show the lowest innovation potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This is slightly surprising given that the literature have shown junior researchers that collaborate with superstars are more likely to publish in high quality journals and have increased chances of engaging in high quality research with other top scientists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' On balance, however, we find that this does not stimulate long term independent career success.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This could be an indication of individuals getting lost in the wake of a superstar, meaning these researchers “bandwagon” off the ideas and visibility of their respective superstars and iterate on the superstar’s work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Although there is value in iterating upon already developed research questions, this may not foster innovative work and stimulate individual careers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Indeed, very recently it has been shown that there is a decline in disruptive ideas in both scientific publications and patents [50].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The authors attribute this to an ever increasing reliance on a narrower set of extant scientific knowledge on which to build ideas, a finding very much in line with our observation that followers of superstars produce redundant and less innovative content as a group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The observed effects could be a consequence of superstars’ strong hold over their respec- tive fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' It’s been shown that paradigm shifts in thinking occur after the sudden deaths of superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Collaborators of superstars suffer a drop in publication rate after their superstar death, and the field may experience a surge of contributions by outsiders who are dispro- portionately likely to be highly-cited [51].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' One can infer that collaborators of superstars are successful because they are collaborating with superstars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Care should be taken when considering these proteges themselves for matters of funding and academic hiring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' If the goal is to foster highly novel work, elements outside of prestige and social connection, such as efficacy, equity, and innovation, should be considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Our findings are not limited solely to early innovators, collaborators, and inspirees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Though we provide early innovators as an example, many other groups [52] can be iso- 15 lated and studied in the way we have done here to identify promising academics based on early signatures of novelty or a range of social parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We outlined multiple differ- ent definitions of novelty in the introduction which we have not further developed in this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Implementing the different definitions and distinguishing different types of novelty can elucidate what types of novelty are stifled or enhanced by different social configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A subject that we have not probed but is directly relevant to our discussion is the matter of funding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In recent times, funding has increasingly become more biased towards top insti- tutions [53], with 90% of NSF funding in 2018 going to 22% of funded institutions, serving 43% of all institutions and 34% of underrepresented minorities [54].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This is coupled with a history of funding disparities with respect to race and underrepresented communities [55–57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Additionally, underrepresented groups produce novel works at higher rates yet are taken up by other scholars at lower rates than novel contributions by gender and racial majorities [46].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Equitable funding programs have been shown to enhance research infrastructure, investiga- tor capabilities, and intra- and inter-university collaborations at less prominent institutions [58].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' As we have shown, those that are least influenced by superstars innovate the most and consequently have higher citation rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Coupling these results with added attention to equitable funding practices [59] we believe will reduce the growing inequality in academia and stimulate novel and innovative research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Finally, we note that our investigation necessarily comes with limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Given our sole focus on the APS body of literature, one should be careful to extrapolate this to other academic disciplines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This is also an incomplete subset of the entire journal, so a full corpus with an entire citation network would give a more accurate picture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [1] Cliff, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' How to make an Apple Pie From Scratch In Search of the Recipe for our Universe (Picador, London, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [2] McAndrew, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Everett, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Music as collective invention: A social network analy- sis of composers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Cultural Sociology 9, 56–80 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1177/ 1749975514542486.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [3] Muller, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Peres, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The effect of social networks structure on innovation perfor- mance: A review and directions for research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' International Journal of Research in Mar- 16 keting 36, 3–19 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='com/science/article/pii/ S0167811618300284.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [4] Hazarie, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Barbosa, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Frank, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Menezes, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Ghoshal, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Uncovering the differences and similarities between physical and virtual mobility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Journal of The Royal Society Interface 17, 20200250 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1098/rsif.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0250.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [5] Chen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Contrasting social and non-social sources of predictability in human mobility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Nature Communications 13, 1922 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1038/ s41467-022-29592-y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [6] Nathaniel Rodriguez, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Johan Bollen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Collective dynamics of belief evolution under cognitive coherence and social conformity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' PLoS ONE 11, e0165910 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [7] Holme, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Ghoshal, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Dynamics of networking agents competing for high centrality and low degree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Physical Review Letters 96, 098701– (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='aps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='098701.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [8] Ghoshal, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Newman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Growing distributed networks with arbitrary degree distri- butions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The European Physical Journal B 58, 175–184 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1140/epjb/e2007-00208-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [9] Recuero, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Zago, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Soares, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Using social network analysis and social capital to iden- tify user roles on polarized political conversations on twitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Social Media + Society 5, 2056305119848745 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1177/2056305119848745.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [10] Dubois, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Gaffney, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The multiple facets of influence: Identifying political influentials and opinion leaders on twitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' American Behavioral Scientist 58, 1260–1277 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1177/0002764214527088.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [11] Radicchi, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Weissman, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Bollen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Quantifying perceived impact of scientific publica- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Journal of Informetrics 11, 704–712 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='com/ science/article/pii/S1751157717300846.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [12] Hirsch, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' An index to quantify an individual’s scientific research output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 102, 16569–16572 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [13] Nielsen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Andersen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Global citation inequality is on the rise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 118, e2012208118 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [14] Merton, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The matthew effect in science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Science 159, 56–63 (1968).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 17 [15] Runco, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Pritzker, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Encyclopedia of Creativity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Encyclopedia of Creativity (Elsevier Science, 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [16] Li, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Aste, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Caccioli, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Livan, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Early coauthorship with top scientists predicts success in academic careers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Nature Communications 10, 5170 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [17] Sekara, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The chaperone effect in scientific publishing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 115, 12603–12607 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [18] Xie, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Zhang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Kim, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Song, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Exploring the influence of coauthorship with top scientists on researchers’ affiliation, research topic, productivity, and impact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Journal of Infor- metrics 16, 101314 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='com/science/article/pii/ S1751157722000669.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [19] Abrahamson, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Rosenkopf, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Social network effects on the extent of innovation diffusion: A computer simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Organization Science 8, 289–309 (1997).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL http://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='jstor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' org/stable/2635149.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [20] Azoulay, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Graff Zivin, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Superstar Extinction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The Quarterly Journal of Economics 125, 549–589 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1162/qjec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='125.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='549.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://academic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='oup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='com/qje/article-pdf/125/2/549/5319678/125-2-549.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [21] Clauset, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Arbesman, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Larremore, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Systematic inequality and hierarchy in faculty hiring networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Science Advances 1, e1400005 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1126/ sciadv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1400005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [22] Janosov, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Battiston, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Sinatra, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Success and luck in creative careers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' EPJ Data Science 9, 9 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1140/epjds/s13688-020-00227-w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [23] Bol, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', de Vaan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & van de Rijt, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The matthew effect in science funding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 115, 4887–4890 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/ abs/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1719557115.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1719557115.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [24] Petersen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Jung, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Yang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Stanley, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Quantitative and empirical demon- stration of the matthew effect in a study of career longevity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 108, 18–23 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/abs/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1073/ pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1016733108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1016733108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [25] Lazer, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Friedman, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The network structure of exploration and exploitation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Adminis- trative Science Quarterly 52, 667 – 694 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 18 [26] Rodan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Galunic, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' More than network structure: How knowledge heterogeneity influ- ences managerial performance and innovativeness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Strategic Management Journal 25, 541–562 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL http://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='jstor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/stable/20142143.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [27] Chang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Joseph E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Harrington, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Discovery and diffusion of knowledge in an endogenous social network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' American Journal of Sociology 110, 937–976 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL http://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='jstor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' org/stable/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1086/426555.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [28] Trapido, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' How novelty in knowledge earns recognition: The role of consistent identities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Research Policy 44, 1488–1500 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='com/science/ article/pii/S0048733315000839.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [29] Xu, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Evans, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Flat teams drive scientific innovation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 119 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [30] Hirsch, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Does the h-index have predictive power?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 104, 19193–19198 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [31] Hirsch, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' An index to quantify an individual’s scientific research output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 102, 16569–16572 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/ abs/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0507655102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0507655102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [32] American Physical Society.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://journals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='aps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [33] Richardson, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://sethc23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='io/wiki/Python/Beautiful_Soup_ Documentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [34] Caron, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & van Eck, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Large scale author name disambiguation using rule-based scor- ing and clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In Noyons, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' (ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=') Proceedings of the Science and Technology Indicators Conference 2014, 79–86 (Universiteit Leiden, 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL http://sti2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='cwts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='nl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Interna- tional conference on science and technology indicators, STI 2014 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Conference date: 03-09-2014 Through 05-09-2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [35] El-Kishky, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Song, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Wang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Voss, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Han, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Scalable topical phrase mining from text corpora.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' VLDB Endow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 8, 305–316 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='14778/ 2735508.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2735519.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [36] Lee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Gibbs sampler and coordinate ascent variational inference: A set-theoretical review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Communications in Statistics - Theory and Methods 51, 1549–1568 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https: //doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1080/03610926.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1921214.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 19 [37] Mimno, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Wallach, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Talley, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Leenders, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & McCallum, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Optimizing semantic coherence in topic models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, 262–272 (Association for Computational Linguistics, Edinburgh, Scotland, UK.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://aclanthology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/D11-1024.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [38] Ouafae, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Oumaima, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Mariam, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Abdelouahid, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Novelty detection review state of art and discussion of new innovations in the main application domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In 2020 1st Inter- national Conference on Innovative Research in Applied Science, Engineering and Technology (IRASET), 1–7 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [39] Soboroff, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Harman, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Overview of the TREC 2003 novelty track.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' In Voorhees, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Buckland, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' (eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=') Proceedings of The Twelfth Text REtrieval Conference, TREC 2003, Gaithersburg, Maryland, USA, November 18-21, 2003, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 500-255 of NIST Special Publication, 38–53 (National Institute of Standards and Technology (NIST), 2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL http://trec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='nist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='gov/pubs/trec12/papers/NOVELTY.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='OVERVIEW.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [40] Ghosal, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Saikh, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Biswas, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Ekbal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Bhattacharyya, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Novelty Detection: A Perspective from Natural Language Processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Computational Linguistics 48, 77–117 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1162/coli_a_00429.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://direct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='mit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='edu/coli/article- pdf/48/1/77/2006641/coli a 00429.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [41] Uzzi, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Mukherjee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Stringer, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Jones, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Atypical combinations and scientific impact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Science 342, 468–472 (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/abs/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1126/science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1240474.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1126/science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1240474.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [42] Schumpeter, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The theory of economic development: An inquiry into profits, capital, credit, interest, and the business cycle (Theorie der wirtschaftlichen Entwicklung) (Transaction, Edi- son, NJ, 1934).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Translated by Redvers Opie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [43] Cover, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Thomas, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Elements of Information Theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Wiley Series in Telecommunica- tions and Signal Processing (Wiley-Interscience, New York, New York, USA, 2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [44] Aral, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Dhillon, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' What (exactly) is novelty in networks?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' unpacking the vision ad- vantages of brokers, bridges, and weak ties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Institute for Operations Research and the Man- agement Sciences (INFORMS) (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL http://dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2139/ssrn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2388254.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://ssrn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='com/abstract=2388254.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [45] Kuhn, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The Structure of Scientific Revolutions (University of Chicago Press, Chicago, 20 1962).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [46] Hofstra, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' The diversityx2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='innovation paradox in science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences 117, 9284–9291 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/abs/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1915378117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1915378117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [47] Baten, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Creativity in temporal social networks: how divergent thinking is impacted by one’s choice of peers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Journal of The Royal Society Interface 17, 20200667 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [48] Baten, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Aslin, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Ghoshal, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Hoque, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Cues to gender and racial identity reduce creativity in diverse social networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Scientific Reports 11, 10261 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https: //doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1038/s41598-021-89498-5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [49] Baten, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Aslin, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Ghoshal, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Hoque, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Novel idea generation in social networks is optimized by exposure to a “goldilocks” level of idea-variability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' PNAS Nexus 1, pgac255 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [50] Park, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Leahey, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Funk, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Papers and patents are becoming less disruptive over time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Nature 613, 138–144 (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1038/s41586-022-05543-x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [51] Azoulay, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Fons-Rosen, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Graff Zivin, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Does science advance one funeral at a time?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' American Economic Review 109, 2889–2920 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='aeaweb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/ articles?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='id=10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1257/aer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20161574.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [52] He, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Ding, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Tang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Reguramalingam, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Bollen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Mining diversity subgraph in mul- tidisciplinary scientific collaboration networks: A meso perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Journal of Informetrics 7, 117–128 (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [53] Murray, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Bias in research grant evaluation has dire consequences for small universi- ties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' PLOS ONE 11, 1–19 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1371/journal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0155876.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [54] of Government Affairs, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Building america’s stem workforce: Eliminating barriers and un- locking advantages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Tech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Rep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', American Physical Society, 1 Physics Ellipse, College Park, MD 20740-3844 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [55] Woodson, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & Boutilier, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Impacts for whom?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Assessing inequalities in NSF- funded broader impacts using the Inclusion-Immediacy Criterion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Science and Pub- lic Policy 49, 168–178 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1093/scipol/scab072.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' https://academic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='oup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='com/spp/article-pdf/49/2/168/43395599/scab072.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [56] Chen, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Decades of systemic racial disparities in funding rates at the national science 21 foundation (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL osf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='io/xb57u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [57] Ginther, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Race, ethnicity, and nih research awards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Science (New York, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=') 333, 1015–9 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [58] Harris, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Established program to stimulate competitive research (epscor): Background and selected issues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Tech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Rep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' R44689, Congressional Research Service, 1 Physics Ellipse, College Park, MD 20740-3844 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' [59] Bollen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Crandall, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Junk, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=', Ding, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' & B¨orner, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' From funding agencies to sci- entific agency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' EMBO reports 15, 131–133 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1002/embr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 201338068.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 22 Supplementary Information Creativity and Production in Academic Social Networks Sean Kelty, Raiyan Abdul Baten, Adiba Proma, Ehsan Hoque, Johann Bollen, Gourab Ghoshal CONTENTS S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Data S-2 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Summary statistics S-2 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Author Disambiguation S-4 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Coherence of Topic Model S-5 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Example of Topic Representation S-6 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Innovation Example S-8 S2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Trends of Novelty and Innovation S-10 S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Author and Paper-Level Novelty and Output S-11 S-1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='02396v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='DL] 6 Jan 2023 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' DATA S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Summary statistics TABLE S1: Summary of citation statistics of corpus with and without abstracts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Dois 678,916 No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Dois (w/ abstracts) 250,628 No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Authors (after disambiguation) 307,894 No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Superstars 303 h-index cutoff for superstars 21 Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' h-index 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='74 Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' References per paper 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' References per paper (w/ abstracts) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='57 Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Citations per paper 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Citations per paper (w/ abstracts) 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='88 S-2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='40 Ratio of Extract Abstracts Ratio of Extracted Abstracts to Total Papers per Journal PRA PRAB PRAPPLIED PRB PRC PRD PRE PRFLUIDS PRL PRMATERIALS PRPER PRRESEARCH PRSTAB PRSTPER PRX PRXQUANTUM Physical Review RMP Journal 101 102 103 104 105 Count of Total Extracted Abstracts Paper Statistics per Journal in APS FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Upper panel: Proportion of analyzed papers in across APS journals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Lower panel: Count of all papers in each journal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 100 101 102 Paper-Level Citations 10−5 10−4 10−3 10−2 10−1 Density Citation Distributions Papers with Abstracts Only Full Corpus 100 101 h-index 10−6 10−5 10−4 10−3 10−2 10−1 100 h-index Distribution Citation and h-index Distributions FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Left: Distribution of citations for all papers (orange curve), for papers with extracted abstracts (blue curve).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Right: Distribution of the h-index across all authors in the corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S-3 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Author Disambiguation We use a scoring-based method to disambiguate authors in our dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Initials Two Initials : 5 More than 2 initials : 10 Conflicting Initials : -10 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' First Name General Name : 3 Non-General Name : 6 (A name is considered general if it has been seen more than 1000 times) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Address/Affiliation Country,City : 4 Country,City,Organization : 7 Country,City,Organization,Department : 10 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Shared Co-Authors one : 4 two : 7 more than 2 : 10 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Source Journal : 6 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Self-Citation : 10 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Bibliographic Coupling (two works referencing a common third work) one : 2 two : 4 three : 6 four : 8 More than four : 10 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Co-citation (Number of times a third work has cited two works) S-4 one : 2 two : 3 three : 4 four : 5 More than 4 : 6 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Coherence of Topic Model We apply the UMass coherence measure to determine a stable number of topics for our topic model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' This coherence score measures how similar the top words in a topic are to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We aim for the highest possible coherence value that stabilizes in a neighborhood of the number of topics k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S3 shows the coherence stablizing at roughly k = 25 topics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0 20 40 60 80 100 k 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 Coherence (umass) Coherence of Topic Model FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Coherence Scores of P-LDA Topic Model S-5 S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Example of Topic Representation Words and phrases in the corpus, which will generally be referred to as "terms", are represented by a distribution over latent topics that is the frequency of topic assignments of the term over the entire corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Topics are characterized by the frequency of terms associated with the topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' For each topic, all terms are ranked based on their relative topic frequency of their own distribution of the given topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' For example, if a phrase had a topic distribution for k = 3 topics of [.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='7], the phrase is representative of topic 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Terms are pre-processed by removing stop words and stemming words such that conjugated versions of the same word can be represented as the same word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' TABLE S2: Topic Model Summary of most representative terms per topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Topic Number Representative Terms Topic 1 crystal film, ultrathin, mtj, stm tip, stack, freestand, high resolution angle, franz, stm, force micrscop Topic 2 center cubic fcc, temperature addit, measur x, tc cuprat, temperature down k, temperature k k, tc k, tc superconduct, tc superconductor, temperature tc k Topic 3 spectral line, ωp, raman line, absorpt part, absorpt line, nd3, electroreflect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' eliashberg,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' b1g,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' endor Topic 4 axial magnet,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' spin angular,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' moment inertia,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' moment magnet,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' parallel magnet field,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' magnet revers,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' torqu,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' interlay exchange,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' spin texture,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' moriya Topic 5 collim,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' electron eject,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ion yield,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ion trap,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' n4,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ion produc,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ion plasma,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' damag,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' wall carbon,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' electron drift Topic 6 cauchi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' broken time,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' takahashi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' hamilton jacobi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' symmetri spontan,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' tachyon,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ward ident,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' polyakov,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' loop quantum cosmolog,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' coulomb guage Topic 7 excitatori,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' hub,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' infect,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' epidem,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' volatil,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' exactli solvabl model,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' network model,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' synaps,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' synapt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' integr fire Topic 8 nonequilibrium phase transit,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' first order phase transit,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' j’,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' glass order,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' thouless transit,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' glass like,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' glass former,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' triangluar lattic,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' nearest neighbor coupl,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' nearest neighbor distanc S-6 Topic 9 magnitude higher,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' larg part,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' fourth gener,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' even though,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' order qcd,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' select rule,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' third,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' mach zehnder interferomet,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' even larger,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' order raman Topic 10 quasilinear,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' langevin equat,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' gilbert equat,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' equate state eo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' sand,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' attractor,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' classic chaotic,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' eulerian,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' chimera state,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' euler equat Topic 11 advanc ligo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' mit bag,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' catalog,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' model background,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' dark sector,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' dark matter,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' sight,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' model dark,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' sky,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' sno Topic 12 nest,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' der waal force,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' nodal line,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' helic edg,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' non fermi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' state degeneraci,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' hove,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' majorana zero,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' majorana bound,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' sdh Topic 13 three dimension 3d,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' basin attract,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' fuld ferrel,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' dimension squar,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' lz,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' trap bose,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' bodi effect,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' bodi forc,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' hard core boson,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' fermion atom Topic 14 highest occupi molecular,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' muffin tin orbit,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' gaas1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' clathrat,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' cl2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' cl,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' hexagon boron,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' interstiti,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' gell,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ci Topic 15 puls width,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' optic parametr,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' sapphir laser,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' exciton biexciton,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' optic pump,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' harmon gener shg,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' optic puls,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' inxga1 xa,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' optic nonlinear,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ultrastrong Topic 16 clauser,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' horn shimoni holt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' simpl analyt express,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' us deriv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' part paper,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' analyt formula,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' cb,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' exact forumla,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' exact expression,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' pauli exclus Topic 17 agre reason,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' foudn good agreement,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' recent experiment data,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' find excel agreement,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' find good agreement,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' theoret data,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' theoret cross,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' reason agreement experiment,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' found excel agreement,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' good agreement experimental result Topic 18 qutrit,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' regist,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' processor,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' studi entagle,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' protocol,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' markovian dynam,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' purif,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' decoy state,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' qkd,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' error correct Topic 19 nucleon nucleon scatter,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' deep inelast scatter,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' total angular momentum,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' inclus cross,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' transfer cross section,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' multifragment,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' multiperipher,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' depend cross section,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' forward angle,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' πn Topic 20 full potenti linear augment plane wave,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' wave born,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' wannier function,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' impuls,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' infield,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' use path,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' use mont,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' within densiti function,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' jastrow,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' use harte Topic 21 avoid walk,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' nonergod,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' time τ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' time tail,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' time t2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' time t2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' dimension diffus,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' time random,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' nonex- ponenti,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' msd Topic 22 even even nuclei,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' xe136,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' rich nuclei,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' gt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' v’,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' p0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' cf252,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' α p,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' α reaction,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' p1 Topic 23 director field,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' shear modulu,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' homeotrop,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' tλ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' antivortex,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' humid,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' u0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' hydrophil,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' shear band,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' shear strain S-7 Topic 24 signific role,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' key role,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' kibbl zurek,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' amino acid,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' play essenti,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' play domin,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' play crucial,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' play critical,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' play central,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' remain elus Topic 25 paramet η,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' ev2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' rev c,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' rev lett,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' eq,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' right hand,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' right left,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' e e collide,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' e e annhil,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' f0 TABLE S3: Example of Phrase-Topic Distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Term Topic-Embedding .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Quantiz [1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0 2259,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 560,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 882,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 677,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0] Quantum [29,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 21,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 4304,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1069,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 4276,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 308,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 6008,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 454,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 46,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 14920,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 35931,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1828,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1384,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 7,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1] Quark [0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 239,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 14542] Quarkonia [0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 125] Quarkonium [0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 299] Quarter [0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 30,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 321,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Quantum Wire [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 342, 0, 0, 292, 0, 0, 23, 0, 0, 0, 0, 91, 0, 0] Quantum Zeno [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0] Quark Antiquark [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 433] Quark Condens [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107] Quark Decay [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Innovation Example The innovation metric counts the first time a term or a new combination of terms have been seen in an article over the entire corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S4 shows the introduction of the terms "quantum" and "cosmolog" in the corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Note that "cosmolog" is the root of words such as "cosmology" and "cosmological" that were lemmatized in pre-processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We plot the frequency of the terms in time as well as vertical lines representing the first year the term S-8 has been seen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' We also plot the counts of the phrase "quantum cosmolog" which is an additionally considered term in our topic model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 1925 1950 1975 2000 Year 0 500 1000 1500 2000 Yearly Counts Quantum Cosmolog Quantum Cosmolog 1925 1950 1975 2000 Year 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 Density FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Example of innovation measure with terms "quantum" and "cosmolog" S-9 S2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' TRENDS OF NOVELTY AND INNOVATION FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Correlations between all novelty and innovation measures based on Pearson’s r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S-10 Correlation of Novelty Measures R^2:0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00137 R^2:0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='000218 R^2:0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00737 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='35 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 OE0 Citation Diversity 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 Reference 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 10 12 12 14 Innovativeness Innovativeness Innovativeness R^2:0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='062 R^2:0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0947 R^2: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0529 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content="35 0't 0't 3." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 OE0 Diversity 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 Citation 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='40 Reference Diversity Reference Diversity Citation DiversityS3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' AUTHOR AND PAPER-LEVEL NOVELTY AND OUTPUT 0 20 40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='095 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='102 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='108 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='115 Author-Level Novelty I(R)(A) All Papers Without Superstar Papers 0 20 40 Number of Author Publications 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='075 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='083 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='092 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1 I(C)(B) 0 20 40 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='16 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='183 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='207 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='23 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='253 I(S)(C) 0 20 40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='333 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='667 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 I(I) (D) Author-Level Novelty Scores vs Author Success FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Novelty and innovation metrics of an author’s publication record as a function of their number of publications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Authors with between 1-50 publications in the corpus have been considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 100 101 102 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='103 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='117 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='13 Paper-Level Novelty I(R)(A) All Papers Without SS Papers 101 102 Number of Citations 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='17 I(C)(B) 100 101 102 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='25 I(S)(C) 100 101 102 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='333 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='667 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 I(I) (D) Paper-Level Novelty Scores vs Paper Success FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Novelty and innovation metrics of an author’s publication record as a function of the number of citations their papers garner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S-11 1970 1980 1990 2000 2010 2020 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='050 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='075 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='100 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='125 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='150 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='175 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='200 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='225 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='250 Entropy 1970 1980 1990 2000 2010 2020 Year 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='080 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='085 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='090 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='095 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='100 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='105 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='110 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='115 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='120 Reference Diversity 1970 1980 1990 2000 2010 2020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='12 Citaiton Diversity 1970 1980 1990 2000 2010 2020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 Innovation Average Novelties per Year FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' All Novelty Measures per Year.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0 10 20 30 40 50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 Temporal Reference Diversity (A) group 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0 10 20 30 40 50 Years After First SS Pub.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='15 Citation Diversity (B) group 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0 10 20 30 40 50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='500 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='525 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='550 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='575 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='600 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='625 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='650 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='675 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='700 Similarities (C) group 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1290 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1295 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1300 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1305 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='1310 Aggregate 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 Inspiration Groups 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='099 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='100 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='101 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='102 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='103 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='104 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='10-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='20-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='30-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='50-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='56 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='57 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='58 Concept Diversity and Similarities of Inspired Groups FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' (A) Reference Diversity, (B) Citation Diversity, (C) Within-group paper similarities for the followers of a superstar partitioned by level of inspiration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Upper panel: temporal evolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Lower panel: averaged in time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S-12 0 5 10 15 20 25 t − t0 (yr) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 Yearly Publications Including Superstar Papers Early Collaborators Early Innovators 0 5 10 15 20 25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content='0 Excluding Superstar Papers Publication Rate per Academic Group FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' S10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} +page_content=' Publication rates of academic groups, LEFT including superstar collabortions and RIGHT excluding superstar collaborations S-13' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NE0T4oBgHgl3EQfewDQ/content/2301.02396v1.pdf'} diff --git a/6NE4T4oBgHgl3EQf1g37/content/2301.05292v1.pdf b/6NE4T4oBgHgl3EQf1g37/content/2301.05292v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..52a54b87c05074be477c17a7b17d8dc61e0a6f43 --- /dev/null +++ b/6NE4T4oBgHgl3EQf1g37/content/2301.05292v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1cee2983f7fd812c2867320eb5e7c069305ce96a456d0d9f0e2951486e1b431 +size 2373256 diff --git a/6NE4T4oBgHgl3EQf1g37/vector_store/index.faiss b/6NE4T4oBgHgl3EQf1g37/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..ccd9eb0c5c55b7259e20842b84242d1ee98fb212 --- /dev/null +++ b/6NE4T4oBgHgl3EQf1g37/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa7bf2455072f437c03d335783c83b931e57ce97f93c301ebddd0f6926f2e13 +size 1048621 diff --git a/6NE4T4oBgHgl3EQf1g37/vector_store/index.pkl b/6NE4T4oBgHgl3EQf1g37/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d03a7c0fcc50f110b26fba2e2cf76bdf4b9a8e86 --- /dev/null +++ b/6NE4T4oBgHgl3EQf1g37/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76d84708d419c91c8af96023665df90600e1fc8c21c871e08825cbaa81b2e3db +size 48592 diff --git a/6NFKT4oBgHgl3EQfTS23/content/tmp_files/2301.11779v1.pdf.txt b/6NFKT4oBgHgl3EQfTS23/content/tmp_files/2301.11779v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..de67ec137692350bcc40600befe8fe8b4075d13e --- /dev/null +++ b/6NFKT4oBgHgl3EQfTS23/content/tmp_files/2301.11779v1.pdf.txt @@ -0,0 +1,568 @@ +Invariant Meta Learning for Out-of-Distribution Generalization +Penghao Jiang, Ke Xin, Zifeng Wang, Chunxi Li +The Australian National University, Canberra, Australia * +Abstract +Modern deep learning techniques have illustrated their +excellent capabilities in many areas, but relies on large +training data. +Optimization-based meta-learning train a +model on a variety tasks, such that it can solve new learn- +ing tasks using only a small number of training samples. +However, these methods assumes that training and test data +are identically and independently distributed. To overcome +such limitation, in this paper, we propose invariant meta +learning for out-of-distribution tasks. Specifically, invari- +ant meta learning find invariant optimal meta-initialization, +and fast adapt to out-of-distribution tasks with regulariza- +tion penalty. Extensive experiments demonstrate the effec- +tiveness of our proposed invariant meta learning on out- +ofdistribution few-shot tasks. +1. Introduction +Modern deep learning techniques have illustrated their +excellent capabilities in many areas like computer vision, +natural language processing and recommendation, etc [11]. +However, these methods relies on large training data. To +overcome this limitation, few-shot learning methods such +as meta learning has been proposed [6]. Most popular meta +learning approaches is the optimization-based metalearning +[4, 16], which is model-agnostic and can be applied to var- +ious downstream tasks. However, many recent researches +have revealed the vulnerability of machine learning model +when exposed to data with different distributions. +Such massive gap is induced by the violation of a funda- +mental assumption that training and test data are identically +and independently distributed (a.k.a. +i.i.d. +assumption), +upon which most of the existing meta learning models are +developed [4, 16]. In many real cases where i.i.d. assump- +tion can hardly be satisfied, especially those high-stake ap- +plications such as healthcare, military and autonomous driv- +ing, instead of generalization within the training distribu- +tion, the ability to generalize under distribution shift is of +more critical significance. As shown in Figure 1, given tran- +* The first two authors contributed equally as joint first authorship. +The last two authors contributed equally as joint second authorship. +Figure 1. Illustration example of how the distribution shifts be- +tween training data and testing data hamper the performance of +model predictions. +Figure 2. Causal framework of dog perdiction task. Due to the +spurious correlation, the model tends to focus on both grass and +dog, which lead to failed prediction in other distributions. +ing data where dogs are on the grass, model could not make +accurate predictions in testing data where dogs are in water, +cage or street. The reason is that the supurious correlation +between grass and dog in traning data hamper the perfor- +mance of model. Due to the spurious correlation, the model +tends to focus on both grass and dog, which lead to failed +prediction in other distribution such as dogs are in water, +cage or street as shown in Figure 2. However, recent meta +learning methods could not overcome the distribution shifts +between training and testing data. In this paper, we con- +sider a realistic scenario where tasks come from different +distributions (out-of-distribution, OOD). +In this paper, to overcome the problem mentioned above, +we propose Invariant Meta Learning (IML) for out-of- dis- +arXiv:2301.11779v1 [cs.LG] 26 Jan 2023 + +Athome +onbeach +eating +incage +inwater +lying +ongrass +instreet +running +Training data +Model +Testing dataGrass--Label:Strongcorrelation +CausalFramework +Weakcausation +Dog noseLabel:Strong correlation +X +Strong causation +T: +grass +X: dog nose +Y:labeltribution tasks, a general learning framework that jointly ad- +justs gradient magnitudes and directions. Specifically, in- +variant meta learning find invariant optimal metainitializa- +tion, and fast adapt to out-of-distribution tasks with regular- +ization penalty. To summarize, our main contributions are: +• We consider the challenge of out-of-distribution tasks +faced by few-shot learning, we show a natural idea to +jointly adjust gradient magnitudes and directions of all +tasks in the meta optimization process; +• We propose Invariant Meta Learning (IML) for out- +ofdistribution tasks, a general learning framework that +jointly adjusts gradient magnitudes and directions; +• We conduct extensive experiments and analysis to +demonstrate that our approach effectively improves the +performance and generalization ability under both in- +distribution and out-of-distribution few-shot settings, +and thus it can be regarded as a better baseline. +2. Method +In this section, we introduce our proposed Invariant Meta +Learning (IML) to address the out-of-distribution problem +in few-shot tasks. +IML learns invariant optimal predic- +tors based on optimization based meta learning framework. +To learn invariant optimal meta-initialization in optimiza- +tion based meta learning, the main challenge is that OOD +problem exacerbates the inconsistency in both task-gradient +magnitudes and directions. +To overcome such problem, +IML finds invariant optimal initialization, and adapt to +outof- distribution tasks with regularization penalty. +Model-agnostic meta-learning (MAML) [4] is an ap- +proach to optimization-based meta-learning that is related +to our work. For some parametric model fθ, MAML aims +to find a single set of parameters θ which, using a few op- +timization steps, can be successfully adapted to any novel +task sampled from the same distribution. For a particular +task instance Ti = +� +Dtr, Dval� +, the parameters are adapted +to task-specific model parameters θ′ +i by applying some dif- +ferentiable function, typically an update rule of the form: +θ′ +i = G +� +θ, Dtr� +, +(1) +where G is typically implemented as a step of gradi- +ent descent on the few-shot training set Dtr , θ′ +i = θ− +α∇θLtr +Ti (fθ). +Generally, multiple sequential adaptation +steps can be applied. The learning rate α can also be met- +alearned concurrently, in which case we refer to this algo- +rithm as Meta-SGD [13]. During meta-training, the param- +eters θ are updated by back-propagating through the adap- +tation procedure, in order to reduce errors on the validation +set Dval : +θ ← θ − η∇θ +� +Ti∼p(T ) +Lval +Ti +� +fθ′ +i +� +. +(2) +The +approach +includes +the +main +ingredients +of +optimization-based meta-learning with neural networks: +initialization is done by maintaining an explicit set of +model parameters θ; the adaptation procedure, or “inner +loop”, takes θ as input and returns θ′ +i adapted specifically +for task instance Ti, by iteratively using gradient descent +(Eq. +1); and termination, which is handled simply by +choosing a fixed number of optimization steps in the “inner +loop”. +MAML updates θ by differentiating through the +“inner loop” in order to minimize errors of instance-specific +adapted models fθ′ +i on the corresponding validation set +(Eq. 2). We refer to this process as the “outer loop” of +meta-learning. We use the same stages to describe IML. +Invariant Meta Learning (IML) finds invariant opti- +mal meta-initialization, and fast adapt to out-of-distribution +tasks with regularization penalty. MAML fast adapt net- +work to new task during the inner loop and learns univer- +sal meta-initialization in outer loop. Similarly, in IML, we +update network with the bi-level update, optimizing clas- +sifier in the inner loop and learning feature representation +in the outer loop. For the inner-level optimization, the pa- +rameters θ of the predictor become θi while adapting to the +task ti ∈ Ttr. This correspond to the inner optimization of +MAML, except that each task ti has a corresponding net- +work θi. The optimization in the inner loop can be defined +as follows: +θ′ +i = θ − α∇θLtr +Ti (fθ) +(3) +where α is a learning rate of the inner optimization. +With inner optimized network fθ′ +i, we have outer loop +objective function with variance penalty regularizer: +Lval = +� +Ti∼p(T tr) +� +Tj∼p(T val) +Lval +Tj +� +fθ′ +i +� +(4) +θ ← θ − η∇θLval − βλ trace +� +VarT val +� +∇θLval�� +(5) +where η, β are the learning rate of the outer loop optimiza- +tion, tj is task j for outer loop optimization for the net- +work θ′ +i, L is the loss function for outer loop optimization. +Note that the inner optimized network fθ′ +i is used to up- +date meta-initialization in outer loop with tj whereas it is +updated from meta-initialization with ti in ther inner loop. +IML learn invariant meta-initialization obtained from the +discrepancy among different training tasks with variance +penalty regularizer. +3. Experiments +Datasets. +In this paper, we address the few-shot clas- +sification problem under both in-distribution and out- +ofdistribution FSL settings. These settings are conducted +on three benchmark datasets: miniImageNet [23], Caltech- +UCSD-Birds 200-2011 (CUB) [25], and SUN Attribute +Database (SUN) [15]. + +Method +miniImageNet +CUB +SUN +5-way 1-shot +5-way 5-shot +5-way 1-shot +5-way 5-shot +5-way 1-shot +5-way 5-shot +Meta-Learner LSTM +24.99 +29.79 +36.23 +44.39 +30.99 +44.86 +MAML +45.69 +60.90 +48.87 +63.99 +57.75 +71.45 +Reptile +26.59 +39.87 +27.21 +42.35 +28.30 +51.62 +Matching Network +47.63 +56.28 +53.06 +62.19 +55.02 +62.57 +Prototypical Network +46.15 +65.56 +48.21 +57.80 +55.70 +67.32 +Relation Network +47.64 +63.65 +52.76 +64.71 +58.29 +72.15 +Baseline +23.84 +32.09 +25.14 +35.35 +27.44 +34.54 +Baseline++ +30.15 +41.19 +32.48 +42.43 +35.56 +44.42 +IML +48.35 +67.21 +54.18 +65.85 +59.24 +74.18 +Table 1. Average accuracy (%) comparison to state-of-the-arts with 95% confidence intervals on 5-way classification tasks under the +in-distribution FSL setting. Best results are displayed in boldface. +Method +miniImageNet→ CUB +miniImageNet→ SUN +CUB→miniImageNet +5-way 1-shot +5-way 5-shot +5-way 1-shot +5-way 5-shot +5-way 1-shot +5-way 5-shot +Meta-Learner LSTM +23.77 +30.58 +25.52 +32.14 +22.58 +28.18 +MAML +40.29 +53.01 +46.07 +59.08 +33.36 +41.58 +Reptile +24.66 +40.86 +32.15 +50.38 +24.56 +40.60 +Matching Network +38.34 +47.64 +39.58 +53.20 +26.23 +32.90 +Prototypical Network +36.60 +54.36 +46.31 +66.21 +29.22 +38.73 +Relation Network +39.33 +50.64 +44.55 +61.45 +28.64 +38.01 +Baseline +24.16 +32.73 +25.49 +37.15 +22.98 +28.41 +Baseline++ +29.40 +40.48 +30.44 +41.71 +23.41 +25.82 +IML +41.27 +57.34 +50.42 +69.15 +34.26 +44.17 +Table 2. Average accuracy (%) comparison to state-of-the-arts with 95% confidence intervals on 5-way classification tasks under the +in-distribution FSL setting. Best results are displayed in boldface. +Baselines. +To evaluate the effectiveness of the proposed +framework, we consider the following representative meta +learning methods on the few-shot image classification task: +MAML [5], Reptile [14], Matching Network [23], Proto- +typical Network [20], Relation Network [21], Baseline and +Baseline++ [3]. +Experimental Settings. +We conduct experiments on 5- +way 1-shot and 5-way 5 -shot settings, there are 15 query +samples per class in each task. We report the average ac- +curacy (%) and the corresponding 95% confidence interval +over the 2000 tasks randomly sampled from novel classes. +To fairly evaluate the original performance of each method, +we use the same 4-layer ConvNet [23] as the backbone for +all methods and do not adopt any data augmentation during +training. All methods are trained via SGD with Adam [10], +and the initial learning rate is set to e−3. For each method, +models are trained for 40,000 tasks at most, and the best +model on the validation classes is used to evaluate the final +reporting performance in the meta-test phase. +Evaluation Using the In-Distribution Setting. +Table 1 +shows the comparative results under the in-distribution FSL +setting on three benchmark datasets. +It is observed that +IML outperforms the original MAML in all in-distribution +FSL scenarios. +For 1-shot and 5-shot on miniImageNet +→ miniImageNet, IML achieves about 1% higher perfor- +mance than Prototypical Network. However, IML achieves +5% and 10% higher performance for 1-shot and 5-shot +on CUB → CUB, and 3% and 6% higher performance +on SUN → SUN. As the latter two scenarios are con- +ducted on finegrained classification datasets, we attribute +the promising improvement to that the categories in these +fine-grained datasets share more local concepts than those +in coarsegrained datasets, and thus a more discriminative +space can be rapidly learned with a few steps of adaptation. +Moreover, IML achieves the best performance among all +baselines in all in-distribution FSL scenarios, which shows +that our approach can be considered as a better baseline op- +tion under the in-distribution FSL setting. +Evaluation Using the Out-of-Distribution Setting. +We +also conduct out-of-distribution FSL experiments and re- +port the comparative results in Table 2. Compared to the re- +sults under the in-distribution setting, it can be observed that +all approaches suffer from a larger discrepancy between the + +distributions of training and testing tasks, which results in +a performance decline in all scenarios. However, IML still +outperforms the original MAML in all out-of-distribution +FSL scenarios, demonstrating that the bilevel optimization +strategy for adaptation and the learning of transferable la- +tent factors can be utilized to improve simple meta learning +approaches. Also, IML achieves all the best results, indicat- +ing that our approach can be regarded as a promising base- +line under the out-of-distribution setting. +4. Conclusion +In this paper, +we consider the challenge of out- +ofdistribution tasks faced by few-shot learning. We propose +Invariant Meta Learning (IML) for out-of-distribution tasks, +a general learning framework that jointly adjusts gradient +magnitudes and directions. Extensive experiments demon- +strate that our approach effectively improves the perfor- +mance and generalization ability under both in-distribution +and out-of-distribution few-shot settings, and thus it can be +regarded as a better baseline. +References +[1] Yoshua Bengio, Samy Bengio, and Jocelyn Cloutier. Learn- +ing a synaptic learning rule. Citeseer, 1990. +[2] Fei Chen, Mi Luo, Zhenhua Dong, Zhenguo Li, and +Xiuqiang He. +Federated meta-learning with fast con- +vergence and efficient communication. +arXiv preprint +arXiv:1802.07876, 2018. +[3] Wei-Yu Chen, Yen-Cheng Liu, Zsolt Kira, Yu-Chiang Frank +Wang, and Jia-Bin Huang. A closer look at few-shot classi- +fication. arXiv preprint arXiv:1904.04232, 2019. 3 +[4] Chelsea Finn, Pieter Abbeel, and Sergey Levine. +Model- +agnostic meta-learning for fast adaptation of deep networks. +In International conference on machine learning, pages +1126–1135. PMLR, 2017. 1, 2 +[5] Chelsea Finn, Pieter Abbeel, and Sergey Levine. +Model- +agnostic meta-learning for fast adaptation of deep networks. +In Proceedings of the 34th International Conference on Ma- +chine Learning, pages 1126–1135. PMLR, 2017. 3 +[6] Chelsea Finn and Sergey Levine. +Meta-learning and +universality: +Deep representations and gradient descent +can approximate any learning algorithm. +arXiv preprint +arXiv:1710.11622, 2017. 1 +[7] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and +Amos Storkey. Meta-learning in neural networks: A survey. +arXiv preprint arXiv:2004.05439, 2020. +[8] Simon Jenni and Paolo Favaro. Deep bilevel learning. In +Proceedings of the European conference on computer vision +(ECCV), pages 618–633, 2018. +[9] Taewon Jeong and Heeyoung Kim. +Ood-maml: +Meta- +learning for few-shot out-of-distribution detection and clas- +sification. Advances in Neural Information Processing Sys- +tems, 33:3907–3916, 2020. +[10] Diederik P Kingma and Jimmy Ba. Adam: A method for +stochastic optimization. +arXiv preprint arXiv:1412.6980, +2014. 3 +[11] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple +layers of features from tiny images. 2009. 1 +[12] Hae Beom Lee, Hayeon Lee, Donghyun Na, Saehoon Kim, +Minseop Park, Eunho Yang, and Sung Ju Hwang. Learn- +ing to balance: Bayesian meta-learning for imbalanced and +out-of-distribution tasks. arXiv preprint arXiv:1905.12917, +2019. +[13] Zhenguo Li, Fengwei Zhou, Fei Chen, and Hang Li. Meta- +sgd: Learning to learn quickly for few-shot learning. arXiv +preprint arXiv:1707.09835, 2017. 2 +[14] Alex Nichol, Joshua Achiam, and John Schulman. +On +first-order +meta-learning +algorithms. +arXiv +preprint +arXiv:1803.02999, 2018. 3 +[15] Genevieve Patterson, Chen Xu, Hang Su, and James Hays. +The sun attribute database: Beyond categories for deeper +scene understanding. International Journal of Computer Vi- +sion, 108(1):59–81, 2014. 2 +[16] Aravind Rajeswaran, Chelsea Finn, Sham M Kakade, and +Sergey Levine. Meta-learning with implicit gradients. Ad- +vances in neural information processing systems, 32, 2019. +1 +[17] Sachin Ravi and Hugo Larochelle. Optimization as a model +for few-shot learning. 2016. +[18] Andrei A Rusu, Dushyant Rao, Jakub Sygnowski, Oriol +Vinyals, Razvan Pascanu, Simon Osindero, and Raia Had- +sell. +Meta-learning with latent embedding optimization. +arXiv preprint arXiv:1807.05960, 2018. +[19] Amrith Setlur, Oscar Li, and Virginia Smith. +Is support +set diversity necessary for meta-learning? +arXiv preprint +arXiv:2011.14048, 2020. +[20] Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical +networks for few-shot learning. Advances in neural informa- +tion processing systems, 30, 2017. 3 +[21] Flood Sung, Yongxin Yang, Li Zhang, Tao Xiang, Philip HS +Torr, and Timothy M Hospedales. Learning to compare: Re- +lation network for few-shot learning. In Proceedings of the +IEEE conference on computer vision and pattern recogni- +tion, pages 1199–1208, 2018. 3 +[22] Sebastian Thrun and Lorien Pratt. +Learning to learn. +Springer Science & Business Media, 2012. +[23] Oriol Vinyals, Charles Blundell, Timothy Lillicrap, Daan +Wierstra, et al. Matching networks for one shot learning. Ad- +vances in neural information processing systems, 29, 2016. +2, 3 +[24] Risto Vuorio, Shao-Hua Sun, Hexiang Hu, and Joseph J +Lim. +Multimodal model-agnostic meta-learning via task- +aware modulation. Advances in Neural Information Process- +ing Systems, 32, 2019. +[25] Catherine Wah, Steve Branson, Peter Welinder, Pietro Per- +ona, and Serge Belongie. The caltech-ucsd birds-200-2011 +dataset. 2011. 2 + diff --git a/6NFKT4oBgHgl3EQfTS23/content/tmp_files/load_file.txt b/6NFKT4oBgHgl3EQfTS23/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..7e08034af5104358890d6494907507fc89dd9a3e --- /dev/null +++ b/6NFKT4oBgHgl3EQfTS23/content/tmp_files/load_file.txt @@ -0,0 +1,301 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf,len=300 +page_content='Invariant Meta Learning for Out-of-Distribution Generalization Penghao Jiang, Ke Xin, Zifeng Wang, Chunxi Li The Australian National University, Canberra, Australia * Abstract Modern deep learning techniques have illustrated their excellent capabilities in many areas, but relies on large training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Optimization-based meta-learning train a model on a variety tasks, such that it can solve new learn- ing tasks using only a small number of training samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' However, these methods assumes that training and test data are identically and independently distributed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' To overcome such limitation, in this paper, we propose invariant meta learning for out-of-distribution tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Specifically, invari- ant meta learning find invariant optimal meta-initialization, and fast adapt to out-of-distribution tasks with regulariza- tion penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Extensive experiments demonstrate the effec- tiveness of our proposed invariant meta learning on out- ofdistribution few-shot tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Introduction Modern deep learning techniques have illustrated their excellent capabilities in many areas like computer vision, natural language processing and recommendation, etc [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' However, these methods relies on large training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' To overcome this limitation, few-shot learning methods such as meta learning has been proposed [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Most popular meta learning approaches is the optimization-based metalearning [4, 16], which is model-agnostic and can be applied to var- ious downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' However, many recent researches have revealed the vulnerability of machine learning model when exposed to data with different distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Such massive gap is induced by the violation of a funda- mental assumption that training and test data are identically and independently distributed (a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' assumption), upon which most of the existing meta learning models are developed [4, 16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' In many real cases where i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' assump- tion can hardly be satisfied, especially those high-stake ap- plications such as healthcare, military and autonomous driv- ing, instead of generalization within the training distribu- tion, the ability to generalize under distribution shift is of more critical significance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' As shown in Figure 1, given tran- The first two authors contributed equally as joint first authorship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' The last two authors contributed equally as joint second authorship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Illustration example of how the distribution shifts be- tween training data and testing data hamper the performance of model predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Causal framework of dog perdiction task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Due to the spurious correlation, the model tends to focus on both grass and dog, which lead to failed prediction in other distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' ing data where dogs are on the grass, model could not make accurate predictions in testing data where dogs are in water, cage or street.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' The reason is that the supurious correlation between grass and dog in traning data hamper the perfor- mance of model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Due to the spurious correlation, the model tends to focus on both grass and dog, which lead to failed prediction in other distribution such as dogs are in water, cage or street as shown in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' However, recent meta learning methods could not overcome the distribution shifts between training and testing data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' In this paper, we con- sider a realistic scenario where tasks come from different distributions (out-of-distribution, OOD).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' In this paper, to overcome the problem mentioned above, we propose Invariant Meta Learning (IML) for out-of- dis- arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='11779v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='LG] 26 Jan 2023 Athome onbeach eating incage inwater lying ongrass instreet running Training data Model Testing dataGrass--Label:Strongcorrelation CausalFramework Weakcausation Dog noseLabel:Strong correlation X Strong causation T: grass X: dog nose Y:labeltribution tasks, a general learning framework that jointly ad- justs gradient magnitudes and directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Specifically, in- variant meta learning find invariant optimal metainitializa- tion, and fast adapt to out-of-distribution tasks with regular- ization penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' To summarize, our main contributions are: We consider the challenge of out-of-distribution tasks faced by few-shot learning, we show a natural idea to jointly adjust gradient magnitudes and directions of all tasks in the meta optimization process;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' We propose Invariant Meta Learning (IML) for out- ofdistribution tasks, a general learning framework that jointly adjusts gradient magnitudes and directions;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' We conduct extensive experiments and analysis to demonstrate that our approach effectively improves the performance and generalization ability under both in- distribution and out-of-distribution few-shot settings, and thus it can be regarded as a better baseline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Method In this section, we introduce our proposed Invariant Meta Learning (IML) to address the out-of-distribution problem in few-shot tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' IML learns invariant optimal predic- tors based on optimization based meta learning framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' To learn invariant optimal meta-initialization in optimiza- tion based meta learning, the main challenge is that OOD problem exacerbates the inconsistency in both task-gradient magnitudes and directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' To overcome such problem, IML finds invariant optimal initialization, and adapt to outof- distribution tasks with regularization penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Model-agnostic meta-learning (MAML) [4] is an ap- proach to optimization-based meta-learning that is related to our work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' For some parametric model fθ, MAML aims to find a single set of parameters θ which, using a few op- timization steps, can be successfully adapted to any novel task sampled from the same distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' For a particular task instance Ti = � Dtr, Dval� , the parameters are adapted to task-specific model parameters θ′ i by applying some dif- ferentiable function, typically an update rule of the form: θ′ i = G � θ, Dtr� , (1) where G is typically implemented as a step of gradi- ent descent on the few-shot training set Dtr , θ′ i = θ− α∇θLtr Ti (fθ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Generally, multiple sequential adaptation steps can be applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' The learning rate α can also be met- alearned concurrently, in which case we refer to this algo- rithm as Meta-SGD [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' During meta-training, the param- eters θ are updated by back-propagating through the adap- tation procedure, in order to reduce errors on the validation set Dval : θ ← θ − η∇θ � Ti∼p(T ) Lval Ti � fθ′ i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' (2) The approach includes the main ingredients of optimization-based meta-learning with neural networks: initialization is done by maintaining an explicit set of model parameters θ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' the adaptation procedure, or “inner loop”, takes θ as input and returns θ′ i adapted specifically for task instance Ti, by iteratively using gradient descent (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 1);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' and termination, which is handled simply by choosing a fixed number of optimization steps in the “inner loop”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' MAML updates θ by differentiating through the “inner loop” in order to minimize errors of instance-specific adapted models fθ′ i on the corresponding validation set (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' We refer to this process as the “outer loop” of meta-learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' We use the same stages to describe IML.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Invariant Meta Learning (IML) finds invariant opti- mal meta-initialization, and fast adapt to out-of-distribution tasks with regularization penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' MAML fast adapt net- work to new task during the inner loop and learns univer- sal meta-initialization in outer loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Similarly, in IML, we update network with the bi-level update, optimizing clas- sifier in the inner loop and learning feature representation in the outer loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' For the inner-level optimization, the pa- rameters θ of the predictor become θi while adapting to the task ti ∈ Ttr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' This correspond to the inner optimization of MAML, except that each task ti has a corresponding net- work θi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' The optimization in the inner loop can be defined as follows: θ′ i = θ − α∇θLtr Ti (fθ) (3) where α is a learning rate of the inner optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' With inner optimized network fθ′ i, we have outer loop objective function with variance penalty regularizer: Lval = � Ti∼p(T tr) � Tj∼p(T val) Lval Tj � fθ′ i � (4) θ ← θ − η∇θLval − βλ trace � VarT val � ∇θLval�� (5) where η, β are the learning rate of the outer loop optimiza- tion, tj is task j for outer loop optimization for the net- work θ′ i, L is the loss function for outer loop optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Note that the inner optimized network fθ′ i is used to up- date meta-initialization in outer loop with tj whereas it is updated from meta-initialization with ti in ther inner loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' IML learn invariant meta-initialization obtained from the discrepancy among different training tasks with variance penalty regularizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Experiments Datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' In this paper, we address the few-shot clas- sification problem under both in-distribution and out- ofdistribution FSL settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' These settings are conducted on three benchmark datasets: miniImageNet [23], Caltech- UCSD-Birds 200-2011 (CUB) [25], and SUN Attribute Database (SUN) [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Method miniImageNet CUB SUN 5-way 1-shot 5-way 5-shot 5-way 1-shot 5-way 5-shot 5-way 1-shot 5-way 5-shot Meta-Learner LSTM 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='99 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='79 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='23 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='39 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='99 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='86 MAML 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='69 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='90 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='87 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='99 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='75 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='45 Reptile 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='59 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='87 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='21 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='35 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='30 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='62 Matching Network 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='63 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='28 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='06 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='19 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='02 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='57 Prototypical Network 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='15 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='56 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='21 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='80 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='70 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='32 Relation Network 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='64 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='65 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='76 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='71 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='29 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='15 Baseline 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='84 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='09 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='14 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='35 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='44 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='54 Baseline++ 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='15 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='19 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='48 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='43 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='56 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='42 IML 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='35 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='21 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='18 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='85 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='24 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='18 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Average accuracy (%) comparison to state-of-the-arts with 95% confidence intervals on 5-way classification tasks under the in-distribution FSL setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Best results are displayed in boldface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Method miniImageNet→ CUB miniImageNet→ SUN CUB→miniImageNet 5-way 1-shot 5-way 5-shot 5-way 1-shot 5-way 5-shot 5-way 1-shot 5-way 5-shot Meta-Learner LSTM 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='77 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='58 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='52 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='14 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='58 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='18 MAML 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='29 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='01 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='07 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='08 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='36 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='58 Reptile 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='66 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='86 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='15 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='38 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='56 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='60 Matching Network 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='34 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='64 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='58 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='20 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='23 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='90 Prototypical Network 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='60 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='36 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='31 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='21 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='22 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='73 Relation Network 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='33 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='64 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='55 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='45 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='64 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='01 Baseline 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='16 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='73 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='49 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='15 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='98 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='41 Baseline++ 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='40 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='48 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='44 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='71 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='41 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='82 IML 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='27 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='34 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='42 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='15 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='26 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='17 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Average accuracy (%) comparison to state-of-the-arts with 95% confidence intervals on 5-way classification tasks under the in-distribution FSL setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Best results are displayed in boldface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' To evaluate the effectiveness of the proposed framework, we consider the following representative meta learning methods on the few-shot image classification task: MAML [5], Reptile [14], Matching Network [23], Proto- typical Network [20], Relation Network [21], Baseline and Baseline++ [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Experimental Settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' We conduct experiments on 5- way 1-shot and 5-way 5 -shot settings, there are 15 query samples per class in each task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' We report the average ac- curacy (%) and the corresponding 95% confidence interval over the 2000 tasks randomly sampled from novel classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' To fairly evaluate the original performance of each method, we use the same 4-layer ConvNet [23] as the backbone for all methods and do not adopt any data augmentation during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' All methods are trained via SGD with Adam [10], and the initial learning rate is set to e−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' For each method, models are trained for 40,000 tasks at most, and the best model on the validation classes is used to evaluate the final reporting performance in the meta-test phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Evaluation Using the In-Distribution Setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Table 1 shows the comparative results under the in-distribution FSL setting on three benchmark datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' It is observed that IML outperforms the original MAML in all in-distribution FSL scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' For 1-shot and 5-shot on miniImageNet → miniImageNet, IML achieves about 1% higher perfor- mance than Prototypical Network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' However, IML achieves 5% and 10% higher performance for 1-shot and 5-shot on CUB → CUB, and 3% and 6% higher performance on SUN → SUN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' As the latter two scenarios are con- ducted on finegrained classification datasets, we attribute the promising improvement to that the categories in these fine-grained datasets share more local concepts than those in coarsegrained datasets, and thus a more discriminative space can be rapidly learned with a few steps of adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Moreover, IML achieves the best performance among all baselines in all in-distribution FSL scenarios, which shows that our approach can be considered as a better baseline op- tion under the in-distribution FSL setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Evaluation Using the Out-of-Distribution Setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' We also conduct out-of-distribution FSL experiments and re- port the comparative results in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Compared to the re- sults under the in-distribution setting, it can be observed that all approaches suffer from a larger discrepancy between the distributions of training and testing tasks, which results in a performance decline in all scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' However, IML still outperforms the original MAML in all out-of-distribution FSL scenarios, demonstrating that the bilevel optimization strategy for adaptation and the learning of transferable la- tent factors can be utilized to improve simple meta learning approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Also, IML achieves all the best results, indicat- ing that our approach can be regarded as a promising base- line under the out-of-distribution setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Conclusion In this paper, we consider the challenge of out- ofdistribution tasks faced by few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' We propose Invariant Meta Learning (IML) for out-of-distribution tasks, a general learning framework that jointly adjusts gradient magnitudes and directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Extensive experiments demon- strate that our approach effectively improves the perfor- mance and generalization ability under both in-distribution and out-of-distribution few-shot settings, and thus it can be regarded as a better baseline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' References [1] Yoshua Bengio, Samy Bengio, and Jocelyn Cloutier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Learn- ing a synaptic learning rule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Citeseer, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [2] Fei Chen, Mi Luo, Zhenhua Dong, Zhenguo Li, and Xiuqiang He.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Federated meta-learning with fast con- vergence and efficient communication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:1802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='07876, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [3] Wei-Yu Chen, Yen-Cheng Liu, Zsolt Kira, Yu-Chiang Frank Wang, and Jia-Bin Huang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' A closer look at few-shot classi- fication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:1904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='04232, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 3 [4] Chelsea Finn, Pieter Abbeel, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Model- agnostic meta-learning for fast adaptation of deep networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' In International conference on machine learning, pages 1126–1135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' PMLR, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 1, 2 [5] Chelsea Finn, Pieter Abbeel, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Model- agnostic meta-learning for fast adaptation of deep networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' In Proceedings of the 34th International Conference on Ma- chine Learning, pages 1126–1135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' PMLR, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 3 [6] Chelsea Finn and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Meta-learning and universality: Deep representations and gradient descent can approximate any learning algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:1710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='11622, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 1 [7] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Meta-learning in neural networks: A survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='05439, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [8] Simon Jenni and Paolo Favaro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Deep bilevel learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' In Proceedings of the European conference on computer vision (ECCV), pages 618–633, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [9] Taewon Jeong and Heeyoung Kim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Ood-maml: Meta- learning for few-shot out-of-distribution detection and clas- sification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Advances in Neural Information Processing Sys- tems, 33:3907–3916, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [10] Diederik P Kingma and Jimmy Ba.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Adam: A method for stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='6980, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 3 [11] Alex Krizhevsky, Geoffrey Hinton, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Learning multiple layers of features from tiny images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 1 [12] Hae Beom Lee, Hayeon Lee, Donghyun Na, Saehoon Kim, Minseop Park, Eunho Yang, and Sung Ju Hwang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Learn- ing to balance: Bayesian meta-learning for imbalanced and out-of-distribution tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='12917, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [13] Zhenguo Li, Fengwei Zhou, Fei Chen, and Hang Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Meta- sgd: Learning to learn quickly for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:1707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='09835, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2 [14] Alex Nichol, Joshua Achiam, and John Schulman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' On first-order meta-learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:1803.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='02999, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 3 [15] Genevieve Patterson, Chen Xu, Hang Su, and James Hays.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' The sun attribute database: Beyond categories for deeper scene understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' International Journal of Computer Vi- sion, 108(1):59–81, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2 [16] Aravind Rajeswaran, Chelsea Finn, Sham M Kakade, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Meta-learning with implicit gradients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Ad- vances in neural information processing systems, 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 1 [17] Sachin Ravi and Hugo Larochelle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Optimization as a model for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [18] Andrei A Rusu, Dushyant Rao, Jakub Sygnowski, Oriol Vinyals, Razvan Pascanu, Simon Osindero, and Raia Had- sell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Meta-learning with latent embedding optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:1807.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='05960, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [19] Amrith Setlur, Oscar Li, and Virginia Smith.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Is support set diversity necessary for meta-learning?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' arXiv preprint arXiv:2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content='14048, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [20] Jake Snell, Kevin Swersky, and Richard Zemel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Prototypical networks for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Advances in neural informa- tion processing systems, 30, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 3 [21] Flood Sung, Yongxin Yang, Li Zhang, Tao Xiang, Philip HS Torr, and Timothy M Hospedales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Learning to compare: Re- lation network for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 1199–1208, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 3 [22] Sebastian Thrun and Lorien Pratt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Learning to learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Springer Science & Business Media, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [23] Oriol Vinyals, Charles Blundell, Timothy Lillicrap, Daan Wierstra, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Matching networks for one shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Ad- vances in neural information processing systems, 29, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2, 3 [24] Risto Vuorio, Shao-Hua Sun, Hexiang Hu, and Joseph J Lim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Multimodal model-agnostic meta-learning via task- aware modulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' Advances in Neural Information Process- ing Systems, 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' [25] Catherine Wah, Steve Branson, Peter Welinder, Pietro Per- ona, and Serge Belongie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' The caltech-ucsd birds-200-2011 dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} +page_content=' 2' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'} diff --git a/7NE2T4oBgHgl3EQfPQZw/vector_store/index.pkl b/7NE2T4oBgHgl3EQfPQZw/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..4cec0ff9e1bc92cb074b7d122a492039d9a3c096 --- /dev/null +++ b/7NE2T4oBgHgl3EQfPQZw/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6686ae2509b51d0f7e491138107b3bf9830e28fe0329d08bf85e63195c4e4234 +size 232162 diff --git a/89E0T4oBgHgl3EQfwgGc/content/2301.02634v1.pdf b/89E0T4oBgHgl3EQfwgGc/content/2301.02634v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aaa09fa71005c58246a38726ca9b78fc110e2ecf --- /dev/null +++ b/89E0T4oBgHgl3EQfwgGc/content/2301.02634v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec76062bdd94914f1b63f1a6c25edf2029673c24f6e641ee2943c96661e20f49 +size 238676 diff --git a/89E0T4oBgHgl3EQfwgGc/vector_store/index.faiss b/89E0T4oBgHgl3EQfwgGc/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..72d109f77e3a5fb0102b2cb6779d573c43fb46f1 --- /dev/null +++ b/89E0T4oBgHgl3EQfwgGc/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:295dae4f778029a89ff3992a1d5ebc746d383d6e53e1abf3ce95c418083b79f2 +size 1966125 diff --git a/89E0T4oBgHgl3EQfwgGc/vector_store/index.pkl b/89E0T4oBgHgl3EQfwgGc/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..3f417bfaced281f0b77979a21cfa48b40eaf3d62 --- /dev/null +++ b/89E0T4oBgHgl3EQfwgGc/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ee97a0bb5cc35a3100c16769c7def0879699feb79cd3a9ad933353e34bfc4fb +size 86167 diff --git a/8dAyT4oBgHgl3EQfp_jS/content/tmp_files/2301.00536v1.pdf.txt b/8dAyT4oBgHgl3EQfp_jS/content/tmp_files/2301.00536v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..2afcf749b5d65abc631a072573e2323c85871806 --- /dev/null +++ b/8dAyT4oBgHgl3EQfp_jS/content/tmp_files/2301.00536v1.pdf.txt @@ -0,0 +1,5015 @@ +arXiv:2301.00536v1 [math.PR] 2 Jan 2023 +Lp-SOLVABILITY AND H¨OLDER REGULARITY FOR STOCHASTIC +TIME FRACTIONAL BURGERS’ EQUATIONS DRIVEN BY +MULTIPLICATIVE SPACE-TIME WHITE NOISE +BEOMSEOK HAN +Abstract. We present the Lp-solvability for stochastic time fractional Burgers’ equations +driven by multiplicative space-time white noise: +∂α +t u = aijuxixj + biuxi + cu + ¯biuuxi + ∂β +t +ˆ t +0 +σ(u)dWt, t > 0; u(0, ·) = u0 +where α ∈ (0, 1), β < 3α/4 + 1/2, and d < 4 − 2(2β − 1)+/α. The operators ∂α +t and +∂β +t are the Caputo fractional derivatives of order α and β, respectively. The process Wt +is an L2(Rd)-valued cylindrical Wiener process, and the coefficients aij, bi, c and σ(u) are +random. +In addition to the existence and uniqueness of a solution, we also suggest the H¨older +regularity of the solution. For example, for any constant T < ∞, small ε, δ > 0, and, +almost sure ω ∈ Ω, we have +sup +x∈Rd |u(ω, ·, x)| +C +� +α +2 ((2−(2β−1)+/α−d/2)∧1)+ (2β−1)− +2 +� +∧1−ε +([δ,T ]) +< ∞ +and +sup +t≤T +|u(ω, t, ·)| +C(2−(2β−1)+ /α−d/2)∧1−ε(Rd) < ∞. +Moreover, δ can be 0 if the initial data u0 = 0. Additionally, the H¨older regularity of the +solution in time changes behavior at β = 1/2. Furthermore, if β ≥ 1/2, then the H¨older +regularity of the solution in time is α/2 times the one in space. +1. Introduction +This article investigates the existence, uniqueness, Lp-regularity, and maximal H¨older +regularity of a solution to stochastic time fractional Burgers’ equations (STFBEs) driven +by space-time white noise. We consider +∂α +t u = Lu + ¯biuuxi + ∂β +t +ˆ t +0 +σ(u)dWt, +(ω, t, x) ∈ Ω × (0, ∞) × Rd; +u(0, ·) = u0, +(1.1) +where α ∈ (0, 1), β < 3 +4α + 1 +2, and d < 4 − 2(2β−1)+ +α +. The operators ∂α +t and ∂β +t are the +Caputo fractional derivatives of order α and β, and the operator L is the second order +random differential operator defined as follows: +(Lu)(ω, t, x) = aij(ω, t, x)uxixj + bi(ω, t, x)uxi + c(ω, t, x)u. +2020 Mathematics Subject Classification. 35R11, 26A33, 60H15, 35R60. +Key words and phrases. Stochastic partial differential equation, Time fractional derivative Stochastic +Burgers’ equation, Time fractional Burgers’ equation, Space-time white noise, H¨older regularity. +This work was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea +government (MSIT) (No. NRF-2021R1C1C2007792) and the BK21 Fostering Outstanding Universities for +Research (FOUR) funded by the Ministry of Education (MOE, Korea) and the National Research Foundation +of Korea (NRF). +1 + +2 +BEOMSEOK HAN +The random coefficients aij, bi, and c are predictable, differentiable (or continuous), and +bounded functions. The diffusion coefficient σ(u) = σ(ω, t, x, u) is a predictable and mea- +surable function satisfying growth conditions and Lipschitz continuity in u. The detailed +conditions on aij, bi, c, and σ are described in Assumptions 3.1 and 3.3. +The random +measure dWt is induced from an L2(Rd)-valued cylindrical Wiener process Wt. +When α = β = 1 in equation (1.1), the equation is said to be a stochastic Burgers’ +equation (SBE) of form +∂tu = Lu + ¯buux + σ(u) ˙W, +(ω, t, x) ∈ Ω × (0, ∞) × R; +u(0, ·) = u0, +(1.2) +where +˙W is the space-time white noise. Numerous studies have been conducted on the +equation (1.2), but we only refer to the reader to [13, 14, 29]. In [13], the author proved +the uniqueness, existence, and continuity of a solution to a semilinear equation, including +an equation of type (1.2) on the unit interval (0, 1). Additionally, the same properties of a +solution on R were obtained in [14] when the L2 bounded conditions on σ(u) were imposed. +In [29], the authors investigated the H¨older regularity and moment estimates of the random +field solution to (1.2) with L = ∆ and ¯b = −1. +In contrast, (deterministic) partial differential equations with Caputo fractional deriva- +tives have been used in many fields, such as electrochemical processes [5, 19], dielectric +polarization [33], viscoelastic materials [32], biology [31], and physics [11, 18]. Especially, +equation (1.1) with α ∈ (0, 1) and σ(u) = 0 is called a time fractional Burgers’ equa- +tion (TFBE), which describes the propagation of waves through viscous media ([1, 2]). +Indeed, various researches have been conducted on numerical analysis for the TFBE (see +[3, 9, 10, 20, 30]). From a mathematical standpoint, it is reasonable to wonder whether it +is possible to demonstrate the uniqueness and existence of a solution to STFBE (1.1), and +also to obtain the H¨older regularity of the solution. To the best of our knowledge, [36] is +the only study that answers this question. The authors of [36] demonstrate the existence, +uniqueness, and regularity of the mild solution to SBEs with fractional derivatives in time +and space on a bounded domain D ⊂ Rd. +In this paper, we provide the Lp uniqueness, existence, and regularity of a strong solution +to equation (1.1) with random second order differential operator L on the whole spatial +domain Rd. +Additionally, we achieve the H¨older regularity of the solution in time and +space. In detail, if u(ω, t, x) denotes the solution to equation (1.1), then for any bounded +stopping time τ ≤ T and small constant ε, δ > 0, almost surely, +sup +x∈Rd |u(ω, ·, x)| +C +� +α +2 ((2−(2β−1)+/α−d/2)∧1)+ (2β−1)− +2 +� +∧1−ε +([δ,τ]) +< ∞, +sup +t≤τ +|u(ω, t, ·)|C(2−(2β−1)+/α−d/2)∧1−ε(Rd) < ∞. +(1.3) +where a+ = (|a| + a)/2, a− = (|a| − a)/2, and Cγ(D) is the H¨older spaces. Observe that +the behavior of the H¨older regularity of the solution in time changes from β = 1/2. For +example, if β ≥ 1/2, then the H¨older regularity of the solution in time is α/2 times that +of the regularity in space. Additionally, we can recover the the H¨older regularity results +of SBEs by letting α, β ↑ 1. These results are consistent with the well-known results of +stochastic heat equations driven by space-time white noise (e.g. [26, Remark 8.7] or [16, +Corollary 3.1]). +In contrast, if β < 1/2, the H¨older regularity in time gains additional +regularity by as much as 1/2 − β. (Remark 3.12). Finally, δ = 0 is allowed if the initial +data u0 is 0 (Remark 3.6). + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +3 +Several remarks about the proof are made. The proof strategy for the main theorem +(Theorem 3.5) is based on [16]. However, some differences exist because since it is not +certain that Itˆo’s formula and the maximum principle hold for STFBE (1.1). +Thus, the proof proceeds as follows. As in [16], we focus on proving the uniqueness and +existence of the Lp solution in each (large) p > 2, and the main difficulty is to demonstrating +the existence of the solutions. Hence, we consider the cut-off form of equation (1.1) to obtain +local solutions. Afterward, we construct a global solution candidate u by pasting the local +solutions (Lemma 4.3 and Remark 4.4). A uniform Lp bound of u is required to show that +our the candidate u is a global solution; thus, we divide the local solution into two parts: +the noise-dominating and the nonlinear-dominating part. To estimate the noise-dominating +parts, we employ the Lp bound of the diffusion coefficient σ(u) (Lemma 4.5). In contrast, +to control the nonlinear-dominating part (Lemma 4.8), we employ an inequality similar to +the chain rule (Lemma 4.6) and a version of the Gr¨onwall inequality including the Caputo +fractional derivatives (Theorem 4.7). +To obtain the maximal H¨older regularity of the solution to equation (1.1), we require two +components: the H¨older embedding theorem for the solution space Hγ +p(τ) (Theorem 2.16) +and the uniqueness of the solution in p (Theorem 3.10). Indeed, when the Lp existence and +uniqueness of a solution are given, we have the H¨older regularity of the solution in each +(large) p > 2 by employing the H¨older embedding theorem for the solution space (Theorem +2.16 and Theorem 3.5). The H¨older regularity of the solution becomes larger as a large p +is chosen; thus, we have to select p that is as large as possible. Therefore, we require the +uniqueness of solutions in p because p varies. +This article is organized as follows. Section 2 introduces the definitions and properties +of space-time white noise, fractional calculus, and stochastic Banach spaces. Additionally, +we present the H¨older embedding theorem for the solution space Hγ +p(τ). Section 3 states +the main results of this article and suggests some remarks. The proof of the main results +is presented in Section 4. Next, Section 5 proves the H¨older embedding theorem for the +solution space Hγ +p(τ). +We finish this section with an introduction to the notation used in this paper. The sets +N and R are sets of natural and real numbers, respectively. +The set Rd denotes the d- +dimensional Euclidean space of points x = (x1, . . . , xd) for xi ∈ R. Throughout this paper, +we assume Einstein’s summation convention on i, j, k ∈ N. We use := to denote a definition. +For a real-valued function f, we set the following: +f+ := |f| + f +2 +and +f− := |f| − f +2 +. +For a normed space F, a measure space (X, M, µ), and p ∈ [1, ∞), a space Lp(X, M, µ; F) +is a set of F-valued Mµ-measurable functions such that +∥u∥Lp(X,M,µ;F ) := +�ˆ +X +∥u(x)∥p +F µ(dx) +�1/p +< ∞. +A set Mµ is the completion of M with respect to the measure µ. +For γ ∈ (0, 1] and +k = 0, 1, 2, . . . , a set Ck+γ(Rd) is the set of R-valued continuous functions u = u(x) such +that +|u|Cγ+k(Rd) := +sup +x∈Rd,|β|=k +���Dβu(x) +��� + +sup +x,y∈Rd,x̸=y +|β|=k +��Dβu(x) − Dβu(y) +�� +|x − y|γ +< ∞, + +4 +BEOMSEOK HAN +where β is a multi-index. Similarly, for γ ∈ (0, 1] and 0 ≤ δ < T < ∞, the set Cγ([δ, T]; F) +is the set of F-valued continuous functions u such that +|u|Cγ([δ,T];F ) := sup +t∈[δ,T] +|u(t)|F + +sup +t,s∈[δ,T], +s̸=t +|u(t) − u(s)|F +|t − s|γ +< ∞. +For a, b ∈ R, we set a∧b := min{a, b} and a∨b := max{a, b}. Let S = S(Rd) denote the set +of Schwartz functions on Rd. Let N = N(a1, a2, ..., ak) be a generic constant if N depends +only on a1, a2, ..., ak. The constant N can vary line by line. For functions depending on ω, t, +and x, the argument ω ∈ Ω is omitted. Finally, for x ∈ Rd, ¯xi := (x1, . . . , xi−1, xi+1, . . . , xd). +2. Preliminaries +In this section, we introduce the definitions and properties of space-time white noise, +fractional calculus, and stochastic Banach spaces. +Throughout this paper, (Ω, F, P) is +a complete probability space equipped with a filtration {Ft}t≥0. +Let {Ft}t≥0 denote a +filtration satisfying the usual conditions. Let P be the predictable σ-field related to {Ft}t≥0. +First, we present the space-time white noise ˙W to understand the stochastic part of (1.1). +Definition 2.1 (Space-time white noise). A generalized random field ˙W is said to be the +space-time white noise if it is a centered Gaussian random field such that its covariance is +given by +E ˙W(h) ˙W (g) = +ˆ ∞ +0 +ˆ +Rd h(t, x)g(t, x)dxdt, +∀h, g ∈ L2((0, ∞) × Rd). +Remark 2.2. We employ a series of Itˆo’s stochastic integral to interpret the stochastic part +of equation (1.1). More precisely, let {ηk : k ∈ N} be an orthonormal basis on L2(Rd). If +we define +wk +t := +ˆ t +0 +ˆ +Rd ηk(x) ˙W(ds, dx) +using the Walsh integral (see [35]), then {wk +t : k ∈ N} is a set of one dimensional indepen- +dent Wiener processes. Then, if we set (see [26, Section 8.3], and [23, Section 7]) +Wt := +∞ +� +k=1 +ηkwk +t , +then Wt is an L2(Rd)-valued cylindrical Wiener process and dWt = � +k ηkdwk +t . +Thus, +equation (1.1) can be rewritten as +∂α +t u = Lu + ¯biuuxi + ∂β +t +ˆ t +0 +σ(u)ηkdwk +t , +(ω, t, x) ∈ Ω × (0, ∞) × Rd; +u(0, ·) = u0. +Next, we review the facts of fractional calculus. For more information, we refer to the +reader to [6, 17, 21, 32]. +Definition 2.3. Let α > 0, and for ϕ ∈ L1((0, T)), the Riemann-Liouville fractional +integral of the order α is defined as follows: +Iα +t ϕ(t) := (Iα +t ϕ)(t) := +1 +Γ(α) +ˆ t +0 +(t − s)α−1ϕ(s)ds +for all +t ∈ (0, T), +where Γ(α) := +´ ∞ +0 +tα−1e−tdt. + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +5 +Remark 2.4. For any q ∈ [1, ∞], by Jensen’s inequality +∥Iαϕ∥Lq((0,T)) ≤ N(α, p, T)∥ϕ∥Lq((0,T)). +(2.1) +Therefore, Iα +t ϕ(t) is well-defined and finite for almost all t ≤ T. Additionally, Fubini’s +theorem implies that, for α, β ≥ 0, we have +Iα+βϕ(t) = IαIβϕ(t). +Definition 2.5. For α > 0, let n ∈ N be a nonnegative integer such that n − 1 ≤ α < n. +Suppose ϕ(t) is a real-valued function on [0, T] such that ϕ is (n − 1)-times differentiable +and ( d +dt)n−1ϕ is absolutely continuous on [0, T]. +(i) The Riemann-Liouville fractional derivative Dα +t ϕ is defined as +Dα +t ϕ(t) := +1 +Γ(n − α) +dn +dtn +ˆ t +0 +(t − s)n−α−1ϕ(s)ds. +(ii) The Caputo fractional derivative ∂α +t ϕ is defined as +∂α +t ϕ := +1 +Γ(n − α) +ˆ t +0 +(t − s)n−α−1ϕ(n)(s)ds +:= +1 +Γ(n − α) +d +dt +ˆ t +0 +(t − s)n−α−1 � +ϕ(n−1)(s) − ϕ(n−1)(0) +� +ds. +Remark 2.6. +(i) For any α, β ≥ 0, Dα +t Dβ +t ϕ = Dα+β +t +ϕ and +Dα +t Iβ +t ϕ = Dα−β +t +ϕ1α>β + Iβ−α +t +ϕ1α≤β. +Additionally, if α ∈ (0, 1), I1−α +t +ϕ is absolutely continuous, and I1−α +t +ϕ(0) = 0, then the +following equality holds: +Iα +t Dα +t ϕ(t) = ϕ(t). +(ii) By the definition of fractional derivatives, if ϕ(0) = ϕ(1)(0) = · · · = ϕ(n−1)(0) = 0, +then Dα +t ϕ = ∂α +t ϕ. +Below we recall the definitions and properties of stochastic Banach spaces (for more +detail, see [12, 25, 26, 27]). The solution space Hγ +p(T) and embedding theorems for Hγ +p(T) +are suggested. +Definition 2.7. Let p > 1 and γ ∈ R. The space Hγ +p = Hγ +p (Rd) is the set of all tempered +distributions u on R such that +∥u∥Hγ +p := +���(1 − ∆)γ/2u +��� +Lp = +���F−1 � +(1 + |ξ|2)γ/2F(u)(ξ) +���� +Lp < ∞. +Similarly, Hγ +p (l2) = Hγ +p (Rd; l2) is a space of l2-valued functions g = (g1, g2, · · · ) such that +∥g∥Hγ +p (l2) := +���� +���(1 − ∆)γ/2g +��� +l2 +���� +Lp += +���� +���F−1 �� +1 + |ξ|2�γ/2 F(g)(ξ) +���� +l2 +���� +Lp +< ∞. +Remark 2.8. Let d ∈ N and γ ∈ (0, ∞). A nonnegative smooth function Rγ(x) exists on +Rd such that, for u ∈ C∞ +c (Rd), +� +(1 − ∆)−γ/2 u +� +(x) = +ˆ +Rd Rγ(y)u(x − y)dy +and +|Rγ(x)| ≤ NAγ,d(x)1|x|≤2 + Ne−|x|/21|x|≥2, + +6 +BEOMSEOK HAN +where N = N(γ, d) is a positive constant and +Aγ,d(x) = + + + + + +|x|γ−d + 1 + O(|x|γ−d+2) +for +0 < γ < d, +log(2/|x|) + 1 + O(|x|2) +for +γ = d, +1 + O(|x|γ−d) +for +γ > d. +For more detail, see [12, Proposition 1.2.5]. +We introduce the space of point-wise multipliers in Hγ +p . +Definition 2.9. Fix γ ∈ R and α ∈ [0, 1) such that α = 0 if γ ∈ Z and α > 0 if |γ| + α is +not an integer. Define +B|γ|+α = + + + + + +B(R) +if γ = 0, +C|γ|−1,1(R) +if γ is a nonzero integer, +C|γ|+α(R) +otherwise, +B|γ|+α(ℓ2) = + + + + + +B(R, ℓ2) +if γ = 0, +C|γ|−1,1(R, ℓ2) +if γ is a nonzero integer, +C|γ|+α(R, ℓ2) +otherwise, +where B(R) is the space of bounded Borel functions on R, C|γ|−1,1(R) represents the space +of |γ| − 1 times continuous differentiable functions whose derivatives of the (|γ| − 1)th +order derivative are Lipschitz continuous, and C|γ|+α is the real-valued H¨older spaces. The +space B(ℓ2) denotes a function space with ℓ2-valued functions instead of real-valued function +spaces. +Below we collect the properties of Bessel potential spaces. +Lemma 2.10. Let γ ∈ R and p > 1. +(i) The space C∞ +c (Rd) is dense in Hγ +p . +(ii) Let γ − d/p = n + ν for some n = 0, 1, · · · and ν ∈ (0, 1]. +Then, for any k ∈ +{0, 1, · · · , n}, we have +|Dku|C(Rd) + |Dnu|Cν(Rd) ≤ N∥u∥Hγ +p , +(2.2) +where Cν(Rd) is the Zygmund space. +(iii) The operator Di : Hγ +p → Hγ+1 +p +is bounded. Moreover, for any u ∈ Hγ+1 +p +, +��Diu +�� +Hγ +p ≤ N∥u∥Hγ+1 +p +, +where N = N(γ, p). +(iv) For γ1, γ2 ∈ R, and u ∈ Hγ1+γ2 +p +, we have +∥∆γ1/2u∥Hγ2 +p +≤ N∥u∥Hγ1+γ2 +p +, +where N = N(γ1, γ2) +(v) For γ ∈ (0, 1), and u ∈ Hγ +p , we have +∥(1 − ∆γ)u∥Lp ≤ N +� +∥u∥Lp + ∥(−∆)γu∥Lp +� +, +where N = N(γ, p) +(vi) For any µ, γ ∈ R, the operator (1 − ∆)µ/2 : Hγ +p → Hγ−µ +p +is an isometry. + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +7 +(vii) Let +ε ∈ [0, 1], +pi ∈ (1, ∞), +γi ∈ R, +i = 0, 1, +γ = εγ1 + (1 − ε)γ0, +1/p = ε/p1 + (1 − ε)/p0. +Then, we have +∥u∥Hγ +p ≤ ∥u∥ε +Hγ1 +p1 ∥u∥1−ε +Hγ0 +p0 . +(viii) Let u ∈ Hγ +p . Then, we have +∥au∥Hγ +p ≤ N∥a∥B|γ|+α∥u∥Hγ +p +and +∥bu∥Hγ +p (ℓ2) ≤ N∥b∥B|γ|+α(ℓ2)∥u∥Hγ +p , +where N = N(γ, p) and B|γ|+α, B|γ|+α(ℓ2) are introduced in Definition 2.9. +Proof. The above results are well known. For (i), (iii), (vi), and (vii), see Theorems 13.3.7 +(i), 13.8.1, 13.3.7 (ii), and Exercise 13.3.20 of [27], respectively. In the case of (ii) and (iv), +see [34]. For (v), see Theorems 1.3.6 and 1.3.8 of [12]. For (viii), we refer the reader to [26, +Lemma 5.2]. +□ +Definition 2.11 (Stochastic Banach spaces). Let τ ≤ T be a bounded stopping time, p ≥ 2, +and γ ∈ R. Set |(0, τ]] := {(ω, t) : 0 < t ≤ τ(ω)} and define +Hγ +p(τ) := Lp +� +|(0, τ]], P, dP × dt; Hγ +p +� +, +Hγ +p(τ, l2) := Lp +� +|(0, τ]], P, dP × dt; Hγ +p (l2) +� +, +U α,γ +p +:= Lp +� +Ω, F0, H +γ− 2 +αp +p +� +. +We write u ∈ Hγ +p if u ∈ Hγ +p(τ) exists for any bounded stopping time τ. Additionally, if +γ = 0, then we use L instead of H, ∥f∥Lp(τ) := ∥f∥H0p(τ). The norm of each space is defined +naturally, for example, +∥f∥Hγ +p(τ) := +� +E +ˆ τ +0 +∥f(t, ·)∥p +Hγ +p dt +�1/p +. +Lemma 2.12 exhibits the relation between the stochastic and fractional integrals, which +is employed when Iα +t or Dα +t is applied to the stochastic part of the SPDEs. +Lemma 2.12. Let T < ∞ be a constant. +(i) Let α ≥ 0 and h ∈ L2(Ω × [0, T], P; l2). Then, the equality +Iα +� ∞ +� +k=1 +ˆ · +0 +hk(s)dwk +s +� +(t) = +∞ +� +k=1 +� +Iα +ˆ · +0 +hk(s)dwk +s +� +(t) +holds for all t ≤ T almost surely and in L2(Ω × [0, T]), where the series on both sides +converge in probability. +(ii) If α ≥ 0 and hn → h in L2(Ω × [0, T], P; l2) as n → ∞, then +∞ +� +k=1 +� +Iα +ˆ · +0 +hk +ndwk +s +� +(t) → +∞ +� +k=1 +� +Iα +ˆ · +0 +hkdwk +s +� +(t) +in probability uniformly on [0, T]. + +8 +BEOMSEOK HAN +(iii) If α > 1/2 and h ∈ L2(Ω×[0, T], P; l2), then +� +Iα �∞ +k=1 +´ · +0 hk(s)dwk +s +� +(t) is differentiable +in t and +∂ +∂t +� +Iα +∞ +� +k=1 +ˆ · +0 +hk(s)dwk +s +� +(t) = +1 +Γ(α) +∞ +� +k=1 +ˆ t +0 +(t − s)α−1hk(s)dwk +s +(a.e.) on Ω × [0, T]. +Proof. See Lemmas 3.1 and 3.3 of [7]. +□ +Fix a small κ0 > 0. For α ∈ (0, 1) and β < α + 1/2, set +c0 := (2β − 1)+ +α ++ κ01β=1/2. +(2.3) +Next, we introduce the solution spaces (for more detail, see Definitions 2.9 and 2.12 in +[25]). +Definition 2.13 (Solution spaces). Let τ ≤ T be a bounded stopping time, α ∈ (0, 1), +β < α + 1/2, γ ∈ R, and p ≥ 2. +(i) For u ∈ Hγ +p(τ), we write u ∈ Hγ +p(τ) if u0 ∈ U α,γ +p +, f ∈ Hγ−2 +p +(τ), and g ∈ Hγ−2+c0 +p +(τ, l2) +such that +∂α +t u(t, x) = f(t, x) + ∂β +t +ˆ t +0 +gk(s, x)dwk +s , +0 < t ≤ τ; +u(0, ·) = u0 +in the sense of distribution. In other words, for any φ ∈ S, the equality +(u(t, ·), φ) = (u0, φ) + Iα +t (f, φ) + Iα−β +t +∞ +� +k=1 +ˆ t +0 +(gk(s, ·), φ)dwk +s +(2.4) +holds for a.e. (ω, t) ∈ Ω × [0, τ]. If α − β ∈ (−1/2, 0), we regard Iα−β +t +as +∂ +∂tIα−β+1 +t +. +The norm in Hγ +p(τ) is defined as follows: +∥u∥Hγ +p(τ) := ∥u∥Hγ +p(τ) + ∥u0∥Uα,γ +p ++ inf +f,g +� +∥f∥Hγ−2 +p +(τ) + ∥g∥Hγ−2+c0 +p +(τ,l2) +� +. +(2.5) +(ii) We say u ∈ Hγ +p,loc(τ) if there exists a sequence τn ↑ τ such that u ∈ Hγ +p(τn) for +each n. We write u = v in Hγ +p,loc(τ) if a sequence of bounded stopping times τn ↑ τ +exists such that u = v in Hγ +p(τn) for each n. We omit τ if τ = ∞. In other words, +Hγ +p,loc = Hγ +p,loc(∞). +Remark 2.14. If α − β ≥ 0, the stochastic part of (2.4) is considered +Iα−β +t +∞ +� +k=1 +ˆ t +0 +(gk(s, ·), φ)dwk +s = +∞ +� +k=1 +Iα−β +t +ˆ t +0 +(gk(s, ·), φ)dwk +s . +Otherwise, if α − β ∈ (−1/2, 0), we regard Iα−β +t +as +∂ +∂tIα−β+1 +t +. Then, by Lemma 2.12 (iii), +the stochastic part of (2.4) is +Iα−β +t +� ∞ +� +k=1 +ˆ t +0 +(gk(s, ·), φ)dwk +s +� += ∂ +∂t +� +Iα−β+1 +∞ +� +k=1 +ˆ t +0 +(gk(s, ·), φ)dwk +s +� += +1 +Γ(α − β + 1) +∞ +� +k=1 +ˆ t +0 +(t − s)α−β+1(gk(s, ·), φ)dwk +s . + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +9 +Below, we provide the properties of the solution space Hγ +p(τ). +Theorem 2.15. Let τ ≤ T be a bounded stopping time. +(i) For ν ∈ R, the map (1 − ∆)ν/2 : Hγ+2 +p +(τ) → Hγ−ν+2 +p +(τ) is an isometry. +(ii) If γ ∈ R, α ∈ (0, 1), β < α + 1/2, and p ≥ 2, then Hγ +p(τ) is a Banach space with the +norm ∥ · ∥Hγ +p(τ). +Proof. The proof is a repeat of [25, Theorem 2.14] with τ instead of T. +□ +Next, we suggest the H¨older embedding theorems for u ∈ Hγ +p(τ). The proof of Theorem +2.16 is contained in Section 5. +Theorem 2.16. Let τ ≤ T be the bounded stopping time, γ ∈ R, α ∈ (0, 1), β < α + 1/2, +and +p > 2 ∨ 1 +α ∨ +1 +α − β + 1/2. +(2.6) +Suppose u ∈ Hγ +p(τ). +(i) Assume ν satisfies +1 +αp < ν < 1 − c0 +2 , +(2.7) +where c0 is the constant introduced in (2.3). Then, u ∈ C([0, τ]; Hγ−2ν +p +) almost surely +and +E sup +t≤τ +∥u(t, ·)∥p +Hγ−2ν +p +≤ N∥u∥p +Hγ +p(τ), +(2.8) +where N = N(α, β, γ, d, p, T). +(ii) Assume α, β, µ, and ν satisfy +1 +αp < µ < (α(ν + c0/2) − β) ∧ 1/2 + 1/2 +α +and +1 +αp < ν < 1 − c0 +2 , +(2.9) +where c0 is the constant introduced in (2.3). Then, for δ ∈ (0, T), u ∈ Cαµ−1/p([δ, τ]; Hγ−2ν +p +) +almost surely and +E∥u∥p +Cαµ−1/p([δ,τ];Hγ−2ν +p +) ≤ N∥u∥p +Hγ +p(τ), +(2.10) +where N = N(α, β, γ, δ, d, p, T). +Remark 2.17. Theorem 2.16 is consistent with the previous results ([26, Theorem 7.2]). +In other words, if we let α, β ↑ 1 in Theorem 2.16, conditions (2.6), (2.7), and (2.9), and +the results in (2.8) and (2.10) approach those of the case of α = β = 1. +Remark 2.18. As stated in Theorem 2.16 (ii), the H¨older regularity of solution in time is +given on [δ, T], where δ ∈ (0, T) (see Remark 5.6). Moreover, if u0 = 0, Theorem 2.16 (ii) +holds for δ = 0 (see Remark 5.8). +By combining Lemma 2.10 (ii) and Theorem 2.16, we have the H¨older embedding results +of solution space H(2−c0−d/2)∧1 +p +(τ) which is a preparation to obtain the maximum H¨older +regularity of solutions. +Corollary 2.19. Let τ ≤ T be a bounded stopping time, α ∈ (0, 1), β < α + 1/2, and +0 < γ < (2 − c0 − d/2) ∧ 1, where c0 is introduced in (2.3). Suppose p satisfies (2.6) and +u ∈ Hγ +p(τ). + +10 +BEOMSEOK HAN +(i) If α, β, γ, ν, d, and p satisfy (2.7) and +ν < 1 +2 +� +γ − d +p +� +, +(2.11) +then u ∈ C([0, τ]; Cγ−2ν−d/p) almost surely and +E sup +t≤τ +∥u(t, ·)∥p +Cγ−2ν−d/p(Rd) ≤ N∥u∥p +Hγ +p(τ), +where N = N(α, β, γ, d, p, T). +(ii) If α, β, γ, µ, ν, d and p satisfy (2.9) and (2.11), then for a small δ > 0, we have +u ∈ Cαµ−1/p([δ, τ]; Cγ−2ν−d/p) +almost surely and +E∥u∥p +Cαµ−1/p([δ,τ];Cγ−2ν−d/p(Rd)) ≤ N∥u∥p +Hγ +p(τ), +where N = N(α, β, γ, δ, d, p, T). +Proof. To demonstrate (i), we employ Lemma 2.10 (ii) and Theorem 2.16 (i). Then, we +have +E sup +t≤τ +∥u(t, ·)∥p +Cγ−2ν−d/p(Rd) ≤ NE sup +t≤τ +∥u(t, ·)∥p +Hγ−2ν +p +(Rd) ≤ N∥u∥p +Hγ +p(τ). +In the case of (ii), Lemma 2.10 (ii) and Theorem 2.16 (ii) imply +E∥u∥p +Cαµ−1/p([δ,τ];Cγ−2ν−d/p(Rd)) ≤ E∥u∥p +Cαµ−1/p([δ,τ];Hγ−2ν +p +(Rd)) ≤ N∥u∥p +Hγ +p(τ). +Thus, the corollary is proved. +□ +3. Main Results +This section presents the uniqueness, existence, Lp-regularity, and H¨older regularity of +the solution to the following equation: +∂α +t u = Lu + ¯biuuxi + ∂β +t +� +k +ˆ t +0 +σ(u)ηkdwk +s, +t > 0; +u(0, ·) = u0, +(3.1) +where Lu = aijuxixj + biuxi + cu. The coefficients aij, bi, and c are P × B(Rd)-measurable, +¯bi is P × B(Rd−1)-measurable, and aij, bi, c, and ¯bi (and their derivatives) are uniformly +bounded (see Assumption 3.1). Additionally, we assume the coefficient ¯bi is independent +of xi. Indeed, because ¯bi is independent of xi, we can employ the fundamental theorem +of calculus to control the nonlinear term ¯biuuxi (see Remark 3.2). Moreover, the diffusion +coefficient σ(u) is dominated by an Lp function h (see Assumption 3.3) and it is used to +obtain a uniform Lp bound of the local solutions (see Remark 3.4). +In Theorem 3.5, we obtain the existence and uniqueness of a solution in Hγ +p, where +γ ∈ (0, 2 − c0 − d/2) ∧ 1. The components of equation (3.1) affect the properties of the +solution u. For example, if α, β, d, and p are given, the regularity γ is determined. Remarks +3.7, 3.8, and 3.9 provide explanations for these relations. +Additionally, in Corollary 3.11, we have the maximal H¨older regularity of the solution by +employing the H¨older embedding theorem for solution spaces and the H¨older regularity of +the solution is given in (1.3). Observe that (1.3) derives from Corollary 2.19, and we have +the following. The constant δ can be taken as 0 if the initial data u0 = 0 (see Remarks 2.18 +and 3.6). Furthermore, depending on the range of β, the behavior of the H¨older regularity + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +11 +of the solution in time varies. In detail, when β ≥ 1/2, then the H¨older regularity of the +solution in space is α/2 times of the H¨older regularity of the solution in time. Moreover, +if we consider the case α, β ↑ 1, then the H¨older regularity in time and space approaches +1/4 and 1/2, which are the results of the SPDEs driven by space-time white noise (e.g. [26, +Remark 8.7] or [16, Corollary 3.1]). In the case of β < 1/2, 1/2− β of the H¨older regularity +in time is obtained due to the regularity of the stochastic integral (Remark 3.12). +The following are assumptions on coefficients. +Assumption 3.1. +(i) The coefficients aij = aij(t, x), bi = bi(t, x), and c = c(t, x) are +P × B(Rd)-measurable. +(ii) The coefficient ¯bi(t, ¯xi) = ¯bi(t, x1, . . . , xi−1, xi+1, . . . , xd) is P × B(Rd−1)-measurable. +(iii) There exists K > 0 such that +K−1|ξ|2 ≤ aij(t, x)ξiξj ≤ K|ξ|2 +for all +(ω, t, x) ∈ Ω × [0, ∞) × Rd, +ξ ∈ Rd, +(3.2) +and +� +i,j +��aij(t, ·) +�� +C2(Rd) + +� +i +��bi(t, ·) +�� +C2(Rd) + |c(t, ·)|C2(Rd) + +� +i +��¯bi(t, ·) +�� +C2(Rd−1) ≤ K +(3.3) +for all (ω, t) ∈ Ω × [0, ∞). +Remark 3.2. To prove the existence of a global solution, we need to acquire a uniform Lp +bound of the local solutions. Thus, we separate the local solutions into two parts: noise- +dominating and nonlinear-dominating parts. +In this remark, we consider the nonlinear- +dominating parts related to ¯biuuxi. +If coefficient ¯bi is independent of xi, coefficient ¯bi can be taken out of the integral for +xi. Then, by the fundamental theorem of calculus to xi, the nonlinear term ¯biuuxi is elimi- +nated in the Lp estimate of the nonlinear-dominating part of the local solutions. Thus, the +nonlinear-dominating parts are controlled by the initial data and diffusion coefficient σ(u) +(for more information, see Lemma 4.8). +To introduce the assumptions on the diffusion coefficient, we may assume p ≥ 2. +Assumption 3.3 (p). +(i) The coefficient σ(t, x, u) is P × B(Rd) × B(R)-measurable. +(ii) There exists a constant K such that +|σ(t, x, u) − σ(t, x, v)| ≤ K|u − v| +for all +(ω, t, x) ∈ Ω × [0, ∞) × Rd, +u, v ∈ R. +(iii) There exists a P × B(Rd)-measurable function h ∈ Lp such that +|σ(t, x, u)| ≤ |h(t, x)| +for all +(ω, t, x) ∈ Ω × [0, ∞) × Rd, +u ∈ R. +(3.4) +Remark 3.4. As mentioned in Remark 3.2, we divide the local solutions into two parts, and +the nonlinear-dominating parts are controlled by the initial data u0 and diffusion coefficients +σ(u). Then, to deal with the noise-dominating term and the terms including σ(u), we employ +the function h(t, x) introduced in Assumption 3.3 (p) (iii). Indeed, the terms related to the +diffusion coefficient σ(u) are controlled by the initial data and h so that a uniform Lp bound +of u is obtained (see Lemmas 4.5 and 4.8). +Next, we introduce the main results. + +12 +BEOMSEOK HAN +Theorem 3.5. Let +α ∈ (0, 1), +β < 3 +4α + 1 +2, +d < 4 − 2c0, +0 < γ < (2 − c0 − d/2) ∧ 1 +(3.5) +and +p = 2k +for some +k ∈ N +and +p > 2 ∨ 1 +α ∨ +1 +α − β + 1/2 ∨ 2 + αd +αγ +∨ +d +1 − γ , +(3.6) +where c0 are the constants introduced in (2.3). Suppose Assumptions 3.1 and 3.3 (p) hold. +If u0 ∈ U α,γ +p +, then there exists a unique solution u ∈ Hγ +p,loc satisfying (3.1). Furthermore, +for ν satisfying (2.7) and (2.11), and for any T ∈ (0, ∞) and bounded stopping time τ ≤ T, +we have +u ∈ C([0, τ]; Cγ−2ν−d/p) +and +sup +t≤τ +∥u(t, ·)∥Cγ−2ν−d/p < ∞ +(3.7) +almost surely. Additionally, for µ and ν satisfying (2.9) and (2.11), and for any T ∈ (0, ∞), +bounded stopping time τ ≤ T, and small δ > 0, we have +u ∈ Cαµ−1/p([δ, τ]; Cγ−2ν−d/p) +and +∥u∥ +Cαµ− 1 +p ([δ,τ];Cγ−2ν−d/p) < ∞ +(3.8) +almost surely. If initial data u0 = 0, (3.8) holds with δ = 0. +Proof. See Proof of Theorem 3.5 in Section 4. +□ +Remark 3.6. If the initial data u0 = 0, we can consider the case δ = 0 because we employ +Theorem 2.16 to obtain (3.8) (see Theorem 2.16 and Remark 2.18). +Remark 3.7. +(i) We assume +α ∈ (0, 1) +because an inequality acting like the chain rule is employed to deal with the nonlinear- +dominating part of the local solution (see Lemma 4.6). +(ii) The conditions +β < 3α/4 + 1/2 +and +d < 4 − 2c0 +are expected to obtain the uniqueness and existence of solutions to SPDEs with Caputo +time fractional derivatives and space-time white noise even for the semilinear case. For +example, see [23, Section 7]. Additionally, observe that the choice of α and β allows +d = 1, 2, 3, where c0 is the constant introduced in (2.3). +Remark 3.8. +(i) For the existence and uniqueness of local solutions, we impose +γ ∈ (0, 2 − c0 − d/2). +(3.9) +Heuristically, if u is a measurable, continuous, and bounded solution to equation (3.1), +then for given T < ∞, we can define a bounded stopping time as follows: +τm := inf +� +t ≥ 0 : sup +x∈Rd |u(t, x)| ≥ m +� +∧ T. +Then, the solution u satisfies the localized version of equation (3.1) on (0, τm). In +other words, +∂α +t u = Lu + 1 +2 +¯bi � +(|u| ∧ m)2� +xi + ∂β +t +� +k +ˆ t +0 +σ(u)ηkdwk +s +(3.10) + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +13 +holds on 0 < t < τm with u(0, ·) = u0. Then, as (3.10) is a semilinear equation, (3.9) +has to be satisfied by [23, Theorem 7.1] (for more detail, see [23, Section 7] and [25, +Section 5]. +(ii) The following condition +γ ∈ (0, 1) +(3.11) +is assumed due to the nonlinear term ¯biuuxi lowering the regularity of the solution. +Even for SBEs (α = β = 1), the condition in (3.11) is required (for more information, +see [13, 14, 15, 16, 29]). +Remark 3.9. +(i) To obtain the local solution, we employ the Lp theory for the semilinear +equation (see [26, Theorem 5.1]). When we control the nonlinear term ¯biuuxi in the +Lp estimate, the kernel of (1 − ∆)− γ−1 +2 +has to be controlled. Hence, +p > +d +1 − γ +is imposed (see Lemma 4.3). +(ii) We require R-valued continuous solutions to consider the cut-off version of equation +(3.1). Therefore, we assume +p > 2 ∧ 1 +α ∧ +1 +α − β + 1/2 ∧ 2 + αd +αγ +which is required to apply the H¨older embedding theorem for Hγ +p (see Theorem 2.16 +and Corollary 2.19). +(iii) As mentioned in Remark 3.7 (i), we employ an inequality similar to the chain rule. +To apply (4.13) instead of chain rule for the Caputo fractional derivative, we assume +p = 2k +for some k ∈ N. +To achieve the maximal H¨older regularity, we require the uniqueness of the solution in p. +Theorem 3.10. Suppose all the conditions of Theorem 3.5 hold. Let u ∈ Hγ +p,loc be the +solution of equation (3.1) introduced in Theorem 3.5. If q > p, u0 ∈ U α,γ +q +, and Assumption +3.3 (q) hold, then u ∈ Hγ +q,loc. +Proof. See Proof of Theorem 3.10 in Section 4. +□ +Finally, we obtain the maximal H¨older regularity of the solution by combining Theorems +3.5 and 3.10. Recall that c0 is introduced in (2.3). +Corollary 3.11. Suppose α, β, d, and γ satisfy (3.5), and u0 ∈ ∩p>2U α,(2−c0−d/2)∧1 +p +, and +h ∈ ∩p>2Lp satisfies (3.4). Then, for T > 0, (1.3) holds almost surely. +Proof. When α, β, d, and γ are given in (3.5), we choose p as in (3.6). For each p, there +exists a unique solution up ∈ Hγ +p,loc to equation (3.1). Due to Theorem 3.10, up ∈ Hγ +q,loc for +any q ≥ p so that we write u instead of up and u is independent of p. Thus, by letting p +large in (3.7) and (3.8), we have (1.3). Thus, the corollary is proved. +□ +Remark 3.12. +(i) If 1/2 ≤ β < α + 1/2, the H¨older regularity in space is α/2 times that +in time. Furthermore, we can recover the H¨older regularity results of SBEs (α = β = 1) +by considering the case α, β ↑ 1. We cite [29, Proposition 5.1] or [16, Corollary 3.1] +for reader’s convenience. + +14 +BEOMSEOK HAN +(ii) If β < 1/2, then the H¨older regularity in time obtains additional regularity by as much +as 1/2 − β. This phenomenon is caused by the stochastic integral of equation (3.1) +adding the H¨older regularity of noise in time almost 1/2, and ∂β +t reducing the regularity +of the noise by β. +4. Proof of Theorems 3.5 and 3.10 +We assume that all conditions in Theorem 3.5 hold for the remainder of this section. +To establish the existence of a global solution, we need to obtain the uniqueness and +existence of local solutions (Lemma 4.3). With these local solutions, we build a candidate +for a global solution. More precisely, we paste the local solutions and demonstrate that the +local existence time explodes almost surely (Lemma 4.9). To prove that the local existence +time explodes almost surely, we demonstrate that a uniform Lp bound of local solutions +exists. In detail, we separate the local solution into noise- and nonlinear-dominating parts. +The noise-dominating part is affected by the stochastic part of the equation, and the other +part is influenced by the nonlinear term biuuxi. When we deal with the noise-dominating +part of the solution, the dominating function of the diffusion coefficient provides a uniform +Lp bound for the noise-dominating part of the local solutions (see Assumption 3.3 (p) (iii) +and Lemma 4.5). The other part is controlled by employing a version of the chain rule and +Gr¨onwall inequality (see Lemmas 4.6 and 4.8 and Theorem 4.7). +First, we introduce the uniqueness and existence theorem for semilinear SPDEs. +Assumption 4.1 (τ). +(i) The functions f(t, x, u) and gk(t, x, u) are P × B(Rd) × B(R)- +measurable functions satisfying the following: +f(t, x, 0) ∈ Hγ +p(τ) +and +g(t, x, 0) = (g1(t, x, 0), g2(t, x, 0), . . . ) ∈ Hγ+1 +p +(τ, l2). +(ii) For any ε > 0, there exists a constant Nε such that for any u, v ∈ Hγ +p(τ), +∥f(u) − f(v)∥p +Hγ−2 +p +(τ) + ∥g(u) − g(v)∥p +Hγ−2+c0 +p +(τ,l2) ≤ ε∥u − v∥p +Hγ +p(τ) + Nε∥u − v∥p +Hγ−2 +p +(τ), +where c0 is the constant introduced in (2.3). +Lemma 4.2. Let τ ≤ T be a bounded stopping time. Suppose Assumption 4.1 (τ) hold. +Then, for initial data u0 ∈ U α,γ +p +, the following equation: +∂α +t u = Lu + f(u) + ∂β +t +ˆ t +0 +gk(u)dwk +t , +0 < t ≤ τ; +u(0, ·) = u0 +(4.1) +has a unique solution u ∈ Hγ +p(τ). Moreover, +∥u∥p +Hγ +p(τ) ≤ N +� +∥u0∥p +Uα,γ +p ++ ∥f(0)∥p +Hγ−2 +p +(τ) + ∥g(0)∥p +Hγ−2+c0 +p +(τ,l2) +� +, +(4.2) +where N = N(α, β, γ, d, p, K, T) and c0 is the constant introduced in (2.3). +Proof. Theorem 5.1 of [26] is the motivation of the proof. The case τ ≡ T is obtained by +[25, Theorem 2.18]; thus, we only consider the case τ ≤ T. +(Step 1). (Existence) Set +¯f(t, u) := 1t≤τf(t, u) +and +¯g(t, u) := 1t≤τg(t, u). +Additionally, ¯f(u) and ¯g(u) satisfy Assumption 4.1 (T). Then, by [25, Theorem 2.18], there +exists a unique solution u ∈ Hγ +p(T) such that u satisfies equation (4.1) with ¯f and ¯g, instead + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +15 +of f and g, respectively. As τ ≤ T, we have u ∈ Hγ +p(τ) and u satisfies equation (4.1) and +estimate (4.2) with f and g. +(Step 2). (Uniqueness) Let u, v ∈ Hγ +p(τ) be two solutions of equation (4.1). Then, [25, +Theorem 2.18] yields there exists a unique solution ¯v ∈ Hγ +p(T) satisfying +∂α +t ¯v = L¯v + ¯f(v) + +∞ +� +k=1 +∂β +t +ˆ t +0 +¯gk(v)dwk +t , +0 < t ≤ T ; +¯v(0, ·) = u0. +(4.3) +Notice that in (4.3), ¯f(v) and ¯g(v) are used instead of ¯f(¯v) and ¯g(¯v), respectively. Set +˜v := v − ¯v. Then, for fixed ω ∈ Ω, we have +∂α +t ˜v = L˜v, +0 < t ≤ τ ; +˜v(0, ·) = 0. +By the deterministic version of [25, Theorem 2.18], we have ˜v = 0 in Lp((0, τ] × Rd) almost +surely. Additionally, it implies v(t, ·) = ¯v(t, ·) in Lp((0, τ] × Rd) almost surely. Thus, in +equation (4.3), we can replace ¯f(v) and ¯g(v) with ¯f(¯v) and ¯g(¯v). Therefore, ¯v ∈ Hγ +p(T) +satisfies equation (4.1) on (0, T] with ¯f, ¯g instead of f, g, respectively. Similarly, by following +word for word, there exists ¯u ∈ Hγ +p(T) such that ¯u satisfies equation (4.1) on (0, T] with +¯f and ¯g instead of f and g. Thus, by the uniqueness result in Hγ +p(T), we have ¯u = ¯v in +Hγ +p(T), which implies u = v in Hγ +p(τ). Thus, the lemma is proved. +□ +Next, we provide the uniqueness and existence of a local solution to equation (3.1). As an +auxiliary function, we choose ρ(·) ∈ C∞ +c (R) such that ρ(z) ≥ 0 on z ∈ (−∞, ∞), ρ(z) = 1 +on |z| ≤ 1, ρ(z) = 0 on |z| ≥ 2, and +d +dzρ(z) ≤ 0 on z ≥ 0. We define the following: +ρm(z) := ρ(z/m). +(4.4) +Lemma 4.3. Let τ ≤ T be a bounded stopping time. For m ∈ N, there exists um ∈ Hγ +p(τ) +such that +∂α +t um = Lum + ¯bi � +u2 +mρm(um) +� +xi + ∂β +t +ˆ t +0 +σ(t, x, um)ηk(x)dwk +t , 0 < t ≤ τ; um(0, ·) = u0, +where ρm is the function introduced in (4.4). Furthermore, um ∈ C([0, τ]; C(Rd)) almost +surely and +E sup +t≤τ +sup +x∈Rd |um(t, x)|p ≤ N∥um∥p +Hγ +p(τ) < ∞ +(4.5) +almost surely. +Proof. Due to Lemma 4.2 and Corollary 2.19, it suffices to show that Assumption 4.1 (τ) +holds. Because σ(t, x, 0) ≤ h(t, x) for all ω, t, x and h ∈ Lp, Assumption 4.1 (i) is satisfied. +In the case of Assumption 4.1 (ii), notice that for u, v ∈ R, we have +��u2ρm(u) − v2ρm(v) +�� ≤ Nm|u − v|. +Then, for u, v ∈ Hγ +p(τ), by Remark 2.8 and Lemmas 2.10 (viii) and (iii), we have +��¯bi � +(u(t, ·))2ρm(u(t, ·)) − (v(t, ·))2ρm(v(t, ·)) +� +xi +��p +Hγ−2 +p +≤ N +��(u(t, ·))2ρm(u(t, ·)) − (v(t, ·))2ρm(v(t, ·)) +��p +Hγ−1 +p +≤ N +ˆ +Rd +�ˆ +Rd |R1−γ(x − y)| +� +(u(·))2ρm(·, u(·)) − (v(·))2ρm(·, v(·)) +� +(t, y)dy +�p +dx +≤ Nm +�ˆ +Rd |R1−γ(x)|dx +�p ˆ +Rd |u(t, x) − v(t, x)|pdx +(4.6) + +16 +BEOMSEOK HAN +and +∥σ(u)η − σ(v)η∥p +Hγ−2+c0 +p +(l2) +≤ +ˆ +Rd +�� +k +�ˆ +Rd |R−γ+2−c0(x − y)| (σ(·, u(·)) − σ(·, v(·)))(t, y)ηk(y)dy +�2�p/2 +dx +≤ +ˆ +Rd +�ˆ +Rd |R−γ+2−c0(x − y)|2 (σ(t, y, u(t, y)) − σ(t, y, v(t, y)))2 dy +�p/2 +dx +≤ Kp +ˆ +Rd +�ˆ +Rd |R−γ+2−c0(y)|2 (u(t, x − y) − v(t, x − y))2dy +�p/2 +dx +≤ Kp +�ˆ +Rd |R−γ+2−c0(y)|2 dy +�p/2 ˆ +Rd |u(t, x) − v(t, x)|pdx +(4.7) +on almost every (ω, t) ∈ |(0, τ]]. Due to Remark 2.8, we have +ˆ +Rd |R1−γ(y)| dy + +ˆ +Rd |R−γ+2−c0(y)|2 dy < ∞. +By integrating with respect to (ω, t) to (4.6) and (4.7), employing Lemma 2.10 (vii), and +Young’s inequality, we have +��¯bi � +u2ρm(u) − v2ρm(v) +� +xi +��p +Hγ−2 +p +(τ) + ∥σ(u)η − σ(v)η∥p +Hγ−2+c0 +p +(τ,l2) +≤ Nm∥u − v∥p +Lp(τ) +≤ ε∥u − v∥p +Hγ +p(τ) + Nm∥u − v∥p +Hγ−2 +p +(τ). +(4.8) +The lemma is proved. +□ +Remark 4.4. We introduce a candidate for a global solution. Let T < ∞. For m ∈ N, let +um ∈ Hγ +p(T) be the solution introduced in Lemma 4.3. Then, for R ∈ {1, 2, . . . , m}, define +a stopping time τ R +m +τ R +m := inf +� +t ≥ 0 : sup +x∈R +|um(t, x)| ≥ R +� +∧ T. +(4.9) +Observe that +τ R +R ≤ τ m +m +(4.10) +Indeed, if R = m, (4.10) is obvious. If R < m, we have um ∧ m = um ∧ m ∧ R = um ∧ R +for t ≤ τ R +m. Therefore, um and uR are solutions to equation +∂α +t u = Lu + ¯bi � +u1+λρR(u) +� +xi + σ(u)ηkdwk +t , +0 < t ≤ τ R +m ; +u(0, ·) = u0. +In contrast, uR ∧ R = uR ∧ R ∧ m = uR ∧ m for t ≤ τ R +R . Thus, um and uR are solutions to +equation +∂α +t u = Lu + ¯b +� +u1+λρm(u) +� +xi + σ(u)ηkdwk +t , +0 < t ≤ τ R +R ; +u(0, ·) = u0. +Observe that the uniqueness and continuity results in Lemma 4.3 yields that um = uR for +all t ≤ (τ R +m ∨ τ R +R ). Therefore, for t ≤ τ R +m, +sup +s≤t +sup +x∈R +|uR(s, x)| = sup +s≤t +sup +x∈R +|um(s, x)| ≤ R, + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +17 +and this implies τ R +m ≤ τ R +R . Similarly, τ R +m ≥ τ R +R ; thus, +τ R +R = τ R +m +almost surely. Moreover, we have τ R +m ≤ τ m +m because m > R. Therefore, we have (4.10). +Further, we define +u(t, x) := um(t, x) +on +t ≤ τ m +m +and set +τ∞ := lim sup +m→∞ lim sup +T→∞ +τ m +m . +(4.11) +It should be remarked that u(t, x) is well-defined on Ω × [0, ∞) × Rd and the nontrivial +domain of u is Ω × [0, τ∞) × Rd. +To obtain a uniform Lp bound of the local solution um, we separate um into noise- and +nonlinear-dominating parts. Lemma 4.5 provides the existence, uniqueness, and estimate +of noise-dominating parts of um. +Lemma 4.5. Let T < ∞. Then, there exists v ∈ Hγ +p(T) such that +∂α +t v = Lv + ∂β +t +ˆ t +0 +σ(s, x, u)ηk(x)dwk +s, +0 < t ≤ T, +u(0, ·) = u0 +Furthermore, v ∈ C([0, T]; C(Rd)) almost surely, and +E sup +t≤T +sup +x∈Rd |v(t, x)|p + E sup +t≤T +∥v(t, ·)∥p +Lp ≤ N∥v∥p +Hγ +p(T) ≤ N∥u0∥p +Uα,γ +p ++ N∥h∥p +Lp(τ), +where N = N(α, β, γ, d, p, K, T). +Proof. Similar to the proof of Lemma 4.3, it is enough to show that Assumption 4.1 (τ) +holds. Set η = (η1, η2, . . . ). Then, by Remark 2.8, for t ≤ T +∥σ(t, ·, u(t, ·))η∥p +Hγ−2+c0 +p +(l2) += +ˆ +Rd +� ∞ +� +k=1 +�ˆ +Rd R−γ+2−c0(x − y)σ(t, y, u(t, y))ηk(y)dy +�2�p/2 +dx += +�ˆ +Rd |R−γ+2−c0(x)|2dx +�p/2 ˆ +Rd |σ(t, y, u(t, y))|pdy +≤ +�ˆ +Rd |R−γ+2−c0(x)|2dx +�p/2 ˆ +Rd |h(t, y)|pdy +≤ N∥h(t, ·)∥p +Lp. +(4.12) +Therefore, +∥σ(u)η∥p +Hγ−2+c0 +p +(T,l2) ≤ E +ˆ T +0 +∥σ(t, ·, u(t, ·))η∥p +Hγ−2+c0 +p +(l2)dt ≤ N∥h∥p +Lp. +Thus, the lemma is proved by Lemma 4.2. +□ +Next, we control the nonlinear-dominating parts of the local solutions. The following two +lemmas are crucial in obtaining uniform Lp bounds. Lemma 4.6 functions as a chain rule, +and Theorem 4.7 is a version of the Gr¨onwall inequality. + +18 +BEOMSEOK HAN +Lemma 4.6. Suppose α ∈ (0, 1) and k ∈ N. For any ψ ∈ C∞ +c ((0, ∞) × Rd), we have +∂α +t (ψ(·, x))2k(t) ≤ 2kψ(t, x)|ψ(t, x)|2k−2∂α +t ψ(t, x), +(4.13) +for all (t, x) ∈ (0, ∞) × Rd. +Proof. We employ the mathematical induction. The results and proof are motivated by +(4.2) of [8]. +(Step 1). First, we consider the case k = 1. Although the proof is in the proof of [8, +Proposition 4.1], we include the proof for the completeness of this paper. +Let ψ ∈ C∞ +c ((0, ∞) × Rd) and t ∈ (0, ∞) and x ∈ Rd. For s ∈ (0, t], set +F1(s) := 1 +2|ψ(s, x)|2, +F2(s) := ψ(s, x)ψ(t, x), +and +F(s) := 1 +2 +� +|ψ(s, x)|2 − |ψ(t, x)|2� +− (ψ(s, x) − ψ(t, x))ψ(t, x). +Further, +F(s) = 1 +2|ψ(s, x) − ψ(t, x)|2 ≥ 0 +on s ≤ t, and the equality holds for s = t. Notice that the integration by parts implies that +ˆ t +0 +(t − s)−α(F ′ +1(s) − F ′ +2(s))ds = +ˆ t +0 +(t − s)−αF ′(s)ds ≤ 0. +Then, by the definition of ∂α +t (Definition 2.5), we have (4.13) with k = 1. +(Step 2). +Let n ∈ N and assume that the results hold for k = 1, 2, . . . , n − 1. +Set +˜ψ(t, x) := (ψ(t, x))2. Since ˜ψ(t, x) ∈ C∞ +c ((0, ∞) × Rd), we have +∂α +t (ψ(·, x))2n(t) = ∂α +t ( ˜ψ(·, x))2n−1(t) +≤ 2n−1 ˜ψ(t, x) +��� ˜ψ(t, x) +��� +2n−1−2 +∂α +t ˜ψ(t, x) += 2n−1 |ψ(t, x)|2n−2 ∂α +t (ψ(t, x))2 +≤ 2nψ(t, x) |ψ(t, x)|2n−2 ∂α +t ψ(t, x). +The lemma is proved. +□ +Theorem 4.7 (Theorem 8 of [4]). Let ψ(t) be a nonnegative integrable function on [0, T]. +For a constant N1, if the function ψ satisfies +ψ(t) ≤ ψ0 + N1Iα +t ψ +on t ∈ [0, T], then +ψ(t) ≤ +� +1 + +∞ +� +k=0 +N k +1 +Γ(kα) +(Γ(α)tα)k +kα +� +ψ0 +on t ∈ [0, T]. +We consider following lemma to control the remainder of the local solution um. + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +19 +Lemma 4.8. Let um ∈ Hγ +p(T) and v ∈ Hγ +p(T) be functions introduced in Lemmas 4.3 and +4.5, and τ m +m be the stopping time introduced in (4.9). Then, +∥um(t, ·) − v(t, ·)∥p +Lp +≤ N sup +t≤T +sup +x∈Rd |v(t, x)|p sup +t≤T +∥v(s, ·)∥p +Lp +� +1 + +∞ +� +k=0 +� +1 + sups≤t,x∈Rd |v(s, x)|2�k +Γ(kα) +(Γ(α)T α)k +kα +� +for all t ≤ τ m +m almost surely, where N = N(p, K). +Proof. Set +wm := um − v +and +fm := Lwm + ¯bi((um)2ρm(um))xi. +Then, we have fm ∈ Hγ−2 +p +(T) since wm, um ∈ Hγ +p(T) and estimates similar to (4.8). Ad- +ditionally, ∂α +t wm = fm. +Let (ω, t) ∈ +|(0, τ m +m ]]. +Due to [23, Remark 2.9], there exists +wn +m ∈ C∞ +c ((0, ∞) × Rd) such that wn +m → wm in Lp((0, t); Hγ +p ), and ∂α +t wn +m is a Cauchy +in Lp((0, t); Hγ−2 +p +). Define +f n +m := ∂α +t wn +m. +Moreover, fm is the limit of f n +m as n → ∞ in Lp((0, t); Hγ−2 +p +) (see [23, Remark 2.9]). +Choose a nonnegative function ζ ∈ C∞ +c (Rd) with a unit integral and set ζε(x) := ε−dζ(x/ε) +for ε > 0. For h ∈ L1,loc(Rd), set h(ε)(x) := +´ +Rd h(y)ζε(x − y)dy. +Next, let ε > 0 and x ∈ Rd. Since wn(ε) +m +∈ C∞ +c ((0, ∞) × Rd) and p = 2k, Lemma 4.6 +yields +1 +p∂α +t +� +wn(ε) +m +(·, x) +�p +(t) ≤ f n(ε) +m +(t, x)wn(ε) +m +(t, x) +���wn(ε) +m +(t, x) +��� +p−2 +(4.14) +on t ∈ (0, ∞). Additionally, as wn +m(0, x) = 0, we have +wn(ε) +m +(0, x) = 0. +(4.15) +Thus, if we take stochastic integral Iα +t on both sides of (4.14), we have +1 +p +���wn(ε) +m +(t, x) +��� +p +≤ Iα +t +� +f n(ε) +m +(·, x)wn(ε) +m +(·, x) +���wn(ε) +m +(·, x) +��� +p−2� +(4.16) +due to +� +wn(ε) +m +�p +∈ C∞ +c ((0, ∞)×Rd), (4.15), and Remark 2.6. Observe that (2.1) with q = ∞ +and the H¨older inequality imply that +����Iα +· +� +f n(ε) +m +(·, x)wn(ε) +m +(·, x) +���wn(ε) +m +(·, x) +��� +p−2 +− f (ε) +m (·, x)w(ε) +m (·, x) +���w(ε) +m (·, x) +��� +p−2����� +L1((0,t)) +≤ +ˆ t +0 +����f n(ε) +m +(s, x)wn(ε) +m +(s, x) +���wn(ε) +m +(s, x) +��� +p−2 +− f (ε) +m (s, x)w(ε) +m (s, x) +���w(ε) +m (s, x) +��� +p−2���� ds +≤ N +ˆ t +0 +���f n(ε) +m +(s, x) − f (ε) +m (s, x) +��� +���wn(ε) +m +(s, x) +��� +p−1 +ds ++ N +ˆ t +0 +����f (ε) +m (s, x) +� +wn(ε) +m +(s, x) +���wn(ε) +m +(s, x) +��� +p−2 +− w(ε) +m (s, x) +���w(ε) +m (s, x) +��� +p−2����� ds +≤ N +� +An +���wn(ε) +m +(·, x) +��� +2 +Lp(0,t) + BnCn +���f (ε) +m (·, x) +��� +Lp(0,t) +� ���wn(ε) +m +(·, x) +��� +p−3 +Lp(0,t) , +(4.17) + +20 +BEOMSEOK HAN +where +An = +���f n(ε) +m +(·, x) − f (ε) +m (·, x) +��� +Lp(0,t) +Bn = +���wn(ε) +m +(·, x) − w(ε) +m (·, x) +��� +Lp(0,t) , +and +Cn = +���wn(ε) +m +(·, x) +��� +Lp(0,t) + +���w(ε) +m (·, x) +��� +Lp(0,t) . +Moreover, +An, Bn → 0 +and +Cn → 2 +���w(ε) +m (·, x) +��� +Lp(0,t) +as +n → ∞ +(4.18) +since wn +m → wm and f n +m → fm in Lp((0, t); Hγ +p ). Then, by applying (4.18) to (4.17), we +have +����Iα +· +� +f n(ε) +m +(·, x)wn(ε) +m +(·, x) +���wn(ε) +m +(·, x) +��� +p−2 +− f (ε) +m (·, x)w(ε) +m (·, x) +���w(ε) +m (·, x) +��� +p−2����� +L1((0,t)) +→ 0 +as n → ∞. Therefore, there exists a sequence nl such that wnl(ε) +m +(·, x) → w(ε) +m (·, x) and +Iα +· +� +f nl(ε) +m +wnl(ε) +m +���wnl(ε) +m +��� +p−2� +→ Iα +· +� +f (ε) +m w(ε) +m +���w(ε) +m +��� +p−2� +almost everywhere on [0, t]. Further- +more, the convergence holds everywhere on [0, t] due to the continuity in t. +Then, by +considering sequence nl instead of n and letting l → ∞ for (4.16), we have +1 +p +���w(ε) +m (t, x) +��� +p +≤ Iα +t +� +f (ε) +m (·, x)w(ε) +m (·, x) +���w(ε) +m (·, x) +��� +p−2� +. +Since t ≤ τ m +m , ρm(um) = 1. By integrating with respect to x, we have +Γ(α) +p +ˆ +Rd +���w(ε) +m (t, x) +��� +p +dx +≤ +ˆ t +0 +(t − s)α−1 +ˆ +Rd(Lwm(s, ·))(ε)(x)w(ε) +m (s, x) +���w(ε) +m (s, x) +��� +p−2 +dxds ++ +ˆ t +0 +(t − s)α−1 +ˆ +Rd +� +¯bi(s, ¯xi) +� +|wm(s, ·) + v(s, ·)|2�(ε) +xi (x) +� +w(ε) +m (s, x) +���w(ε) +m (s, x) +��� +p−2 +dxds. +(4.19) +Furthermore, by integration by parts, we obtain +ˆ +Rd +� +(Lwm(s, ·))(ε)(x) + ¯bi � +|wm(s, ·) + v(s, ·)|2�(ε) +xi (x) +� +w(ε) +m (s, x) +���w(ε) +m (s, x) +��� +p−2 +dx +≤ −(p − 1) +ˆ +Rd +� +aijwm +�(ε) +xj (s, x) +���w(ε) +m (s, x) +��� +p−2 +w(ε) +mxi(s, x)dx ++ (p − 1) +ˆ +Rd +�� +2aij +xj − bi� +wm +�(ε) +(s, x) +���w(ε) +m (s, x) +��� +p−2 +w(ε) +mxi(s, x)dx ++ +ˆ +Rd +�� +aij +xixj − bi +xi + c +� +wm +�(ε) +(s, x)w(ε) +m (s, x) +���w(ε) +m (s, x) +��� +p−2 +dx +− (p − 1) +ˆ +Rd +¯bi(s, ¯xi) +� +(wm(s, ·) + v(s, ·))2�(ε) (x) +���w(ε) +m (s, x) +��� +p−2 +w(ε) +mxi(s, x)dx. +(4.20) + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +21 +Additionally, observe that +��� +� +aijwm +�(ε) +xj (s, x) − aij(s, x)w(ε) +mxj(s, x) +��� += ε−1 +���� +ˆ +Rd +� +aij(s, x − εy) − aij(s, x) +� +wm(s, x − εy)ζyj(y)dy +���� +≤ N(K) +ˆ +Rd |wm(s, x − εy)||y||ζy(y)|dy, +(4.21) +and by (3.2), +− +ˆ +Rd aij(s, x)w(ε) +mxi(s, x)w(ε) +mxj(s, x) +���w(ε) +m (s, x) +��� +p−2 +dx +≤ −K−1 +ˆ +Rd +���w(ε) +m (s, x) +��� +p−2 ���w(ε) +mx(s, x) +��� +2 +dx. +(4.22) +Thus, by combining (4.21) and (4.22) +− +ˆ +Rd +� +aijwm +�(ε) +xj (s, x) +���w(ε) +m (s, x) +��� +p−2 +w(ε) +mxi(s, x)dx += − +ˆ +Rd +���w(ε) +m (s, x) +��� +p−2 +w(ε) +mxi(s, x) +� +(awm)(ε) +xj (s, x) − a(s, x)w(ε) +mxj(s, x) +� +dx +− +ˆ +Rd aij(s, x)w(ε) +mxi(s, x)w(ε) +mxj(s, x) +���w(ε) +m (s, x) +��� +p−2 +dx +≤ N +ˆ +Rd +���w(ε) +m (s, x) +��� +p−2 ���w(ε) +mxi(s, x) +��� +ˆ +Rd |wm(s, x − εy)||y||ζy(y)|dydx +− K−1 +ˆ +Rd +���w(ε) +m (s, x) +��� +p−2 ���w(ε) +mx(s, x) +��� +2 +dx +≤ N +ˆ +Rd +���w(ε) +m (s, x) +��� +p−2 �ˆ +Rd |wm(s, x − εy)||y||ζy(y)|dy +�2 +dx +− 1 +2K−1 +ˆ +Rd +���w(ε) +m (s, x) +��� +p−2 ���w(ε) +mx(s, x) +��� +2 +dx, +(4.23) +where N = N(K). Moreover, +���� +�� +2aij +xj − bi� +wm +�(ε) +(s, x) +���� = +���� +ˆ +Rd +� +2aij +yj(s, y) − bi(s, y) +� +wm(s, y)ζε(x − y)dy +���� +≤ K +ˆ +Rd |wm(s, y)|ζε(x − y)dy += K(|wm(s, ·)|)(ε)(x) +(4.24) +and +���� +�� +aij +xixj − bi +xi + c +� +wm +�(ε) +(s, x) +���� ≤ K(|wm(s, ·)|)(ε)(x). +(4.25) + +22 +BEOMSEOK HAN +Thus, by applying H¨older’s inequality, (4.23), (4.24), and (4.25) to (4.20), we have +ˆ +Rd +� +(Lwm(s, ·))(ε)(x) + ¯bi � +|wm(s, ·) + v(s, ·)|2�(ε) +xi (x) +� +w(ε) +m (s, x) +���w(ε) +m (s, x) +��� +p−2 +dx +≤ N +ˆ +R +���w(ε) +m (s, x) +��� +p−2 �ˆ +R +|wm(s, x − εy)||y||ζy(y)|dy +�2 +dx +− p − 1 +4K +� +i +ˆ +R +���w(ε) +m (s, x) +��� +p−2 ���w(ε) +mxi(s, x) +��� +2 +dx ++ N +� +i +ˆ +Rd +� +(|wm(s, ·)|)(ε)(x) +�2 ���w(ε) +m (s, x) +��� +p−2 +dx ++ N +ˆ +Rd(|wm(s, ·)|)(ε)(x) +���w(ε) +m (s, x) +��� +p−1 +dx. +− (p − 1) +� +i +ˆ +Rd +¯bi(s, ¯xi) +� +(wm(s, ·) + v(s, ·))2�(ε) (x) +���w(ε) +m (s, x) +��� +p−2 +w(ε) +mxi(s, x)dx, +(4.26) +where N = N(K). Furthermore, note that +ˆ +Rd +���w(ε) +m (s, x) +��� +p +w(ε) +mxi(s, x)dx = 0 +for +s ≤ t. +(4.27) +Indeed, take a nonnegative smooth function φ ∈ C∞ +c (Rd) such that φ(x) = 1 on |x| < 1, +φ(x) = 0 on |x| > 2, and supx∈Rd |φ′(x)| ≤ 2. Then, integration by parts yields +ˆ +Rd +���w(ε) +m (s, x) +��� +p +w(ε) +mxi(s, x)φ(x/n)dx += −p +ˆ +Rd +���w(ε) +m (s, x) +��� +p +w(ε) +mxi(s, x)φ(x/n)dx − 1 +n +ˆ +Rd +���w(ε) +m (s, x) +��� +p +w(ε) +m (s, x)φ′(x/n)dx. +Thus, we have +lim sup +n→∞ +���� +ˆ +Rd +���w(ε) +m (s, x) +��� +p +w(ε) +mxi(s, x)φ(x/n)dx +���� ≤ lim sup +n→∞ +2 +n(p + 1) +ˆ +Rd +���w(ε) +m (s, x) +��� +p+1 +dx = 0 +(4.28) +and (4.28) yields (4.27). Then, from the last term of (4.26), by applying (4.27) and the +H¨older’s inequality, we have +����� +� +i +ˆ +Rd−1 +¯bi(s, ¯xi) +ˆ +R +� +|wm(t, ·) + v(t, ·)|2�(ε) +(x) +���w(ε) +m (t, x) +��� +p−2 +w(ε) +mxi(t, x)dxid¯xi +����� +≤ N +� +i +ˆ +Rd +���� +� +|wm(t, ·) + v(t, ·)|2�(ε) +(x) − +���w(ε) +m (t, x) +��� +2���� +���w(ε) +m (t, x) +��� +p−2 ���w(ε) +mxi(t, x) +��� dx +≤ N +ˆ +Rd +�� +|wm(t, ·) + v(t, ·)|2�(ε) +(x) − +���w(ε) +m (t, x) +��� +2�2 ���w(ε) +m (t, x) +��� +p−2 +dx ++ +1 +8KN +� +i +ˆ +Rd +���w(ε) +mxi(t, x) +��� +2 ���w(ε) +m (t, x) +��� +p−2 +dx, +(4.29) + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +23 +where N = N(K). Then, by applying (4.26) and (4.29) to (4.19), we have +ˆ +Rd +���w(ε) +m (t, x) +��� +p +dx +≤ NIα +t +ˆ +R +���w(ε) +m (s, x) +��� +p−2 �ˆ +R +|wm(s, x − εy)||y||ζy(y)|dy +�2 +dx ++ NIα +t +ˆ +Rd +���(|wm(s, ·)|)(ε)(x) +��� +2 ���w(ε) +m (s, x) +��� +p−2 ++ (|wm(s, ·)|)(ε)(x) +���w(ε) +m (s, x) +��� +p−1 +dx ++ NIα +t +ˆ +Rd +�� +|wm(t, ·) + v(t, ·)|2�(ε) +(x) − +���w(ε) +m (t, x) +��� +2�2 ���w(ε) +m (t, x) +��� +p−2 +dx, +where N = N(p, K). By letting ε ↓ 0, we have +∥wm(t, ·)∥p +Lp +≤ NIα +t +ˆ +R +|wm(·, x)|p dx + NIα +t +ˆ +Rd +� +|wm(·, x) + v(·, x)|2 − |wm(·, x)|2�2 +|wm(·, x)|p−2 dx +≤ NIα +t +ˆ +R +|wm(·, x)|p dx + NIα +t +ˆ +Rd |v(·, x)|2 |wm(·, x)|p + |v(·, x)|4 |wm(·, x)|p−2 dx +≤ N +� +1 + +sup +s≤t,x∈Rd |v(s, x)|2 +� +Iα +t ∥wm(·, ·)∥p +Lp + N sup +s≤t +∥v(s, ·)∥2p +L2p. +for all t ≤ τ m +m . Then, by Theorem 4.7, we obtain +∥wm(t, ·)∥p +Lp ≤ N sup +s≤t +∥v(s, ·)∥2p +L2p +� +1 + +∞ +� +k=0 +� +1 + sups≤t,x∈Rd |v(s, x)|2�k +Γ(kα) +(Γ(α)T α)k +kα +� +for all t ≤ τ m +m . The lemma is proved. +□ +Finally, we demonstrate that the global solution candidate does not explode in a finite +time. +Lemma 4.9. For any T < ∞, we have +lim +R→∞ P +�� +ω ∈ Ω : +sup +t≤T,x∈Rd |u(t, x)| > R +�� += 0. +Proof. Let v be the function introduced in Lemma 4.5. Define +τ 1(S) := inf +� +t ≥ 0 : ∥v(t, ·)∥Lp ≥ S +� +∧ T, +τ 2(S) := inf +� +t ≥ 0 : sup +x∈Rd |v(t, x)| ≥ S +� +∧ T. +and +τ 0 +m(S) := τ m +m ∧ τ 1(S) ∧ τ 2(S), + +24 +BEOMSEOK HAN +where τ m +m is the stopping time introduced in (4.9). Set r := +p +p−1. Then, by Lemmas 4.2 and +2.10, (viii), H¨older inequality, and Minkowski inequality, we have +∥um∥p +Hγ +p(τ 0m(S)) − N∥u0∥p +Uα,γ +p +≤ N +����� +� +i +¯bi(u2 +mρm(um))xi +����� +p +Hγ−2 +p +(τ 0m(S)) ++ N∥σ(um)η∥p +Hγ−2+c0 +p +(τ 0m(S),l2) +≤ N +��u2 +m +��p +Hγ−1 +p +(τ 0m(S)) + N∥σ(um)η∥p +Hγ−2+c0 +p +(τ 0m(S),l2) +≤ NE +ˆ τ 0 +m(S) +0 +ˆ +Rd +���� +ˆ +Rd R1−γ(y)|um(s, x − y)|2dy +���� +p +dxds ++ NE +ˆ τ 0 +m(S) +0 +ˆ +Rd +���� +ˆ +Rd |R−γ+2−c0(y)|2 |um(s, x − y)|2dy +���� +p/2 +dxds +≤ NE +ˆ τ 0 +m(S) +0 +ˆ +Rd +���� +ˆ +Rd |R1−γ(y)|r |um(s, x − y)|rdy +���� +p/r +dx +ˆ +Rd |um(s, x)|pdxds ++ NE +ˆ τ 0 +m(S) +0 +�ˆ +Rd |R−γ+2−c0(x)|2 dx +�p/2 ˆ +Rd |um(s, x)|pdxds +≤ N0E +ˆ τ 0 +m(S) +0 +� +1 + +ˆ +Rd |um(s, x)|pdx +� ˆ +Rd |um(s, x)|pdxds, +(4.30) +where N0 = N(α, β, γ, d, p, K, T) +��´ +Rd |R1−γ(x)|r dx +�p/r + +�´ +Rd |R−γ+2−c0(x)|2 dx +�p/2� +. Note +that N0 < ∞ due to r < +d +d+γ−1 and Remark 2.8. Then, by Lemma 4.8 and the definitions +of τ1(S) and τ2(S), +ˆ +Rd |um(t, x)|pdx +≤ N +ˆ +Rd |um(t, x) − v(t, x)|p + |v(t, x)|pdx +≤ N sup +s≤t +sup +x∈Rd |v(t, x)| sup +s≤t +∥v(s, ·)∥p +p +� +1 + +∞ +� +k=0 +� +1 + sups≤t,x∈Rd |v(t, x)|2�k +Γ(kα) +(Γ(α)T α)k +kα +� ++ +ˆ +Rd |v(t, x)|pdx +< N(p, S, K). +(4.31) +Therefore, by combining (4.30) and (4.31), we have +∥um∥p +Hγ +p(τ 0m(S)) ≤ N + N∥u0∥p +Uα,γ +p +, +(4.32) + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +25 +where N = N(α, β, γ, d, p, S, K, T). It should be noted that the right-hand side of (4.32) is +independent of m. Therefore, by Chebyshev’s inequality and Lemma 4.3, we have +P +� +sup +t≤τ 0m(S) +sup +x∈Rd |u(t, x)| > R +� +≤ 1 +RpE +sup +t≤τ 0m,x∈Rd |u(t, x)|p +≤ 1 +RpE +sup +t≤τ 0m,x∈Rd |um(t, x)|p +≤ 1 +Rp∥um∥p +Hγ +p(τ 0m) +≤ N +Rp, +where N = N(u0, α, β, γ, d, p, S, K, T). In contrast, by Lemma 4.5, +P +� +τ 1(S) < T +� ++ P +� +τ 2(S) < T +� +≤ P +� +sup +t≤T +∥v(t, ·)∥Lp > S +� ++ P +� +sup +t≤T +sup +x∈Rd |v(t, x)| > S +� +≤ 1 +Sp E sup +t≤T +∥v(t, ·)∥p +Lp + 1 +Sp E +sup +t≤T,x∈Rd |v(t, x)|p +≤ 1 +Sp N(u0, h, α, β, γ, d, p, K, T). +Thus, +P +� +sup +t≤T,x∈Rd |u(t, x)| > R +� +≤ lim inf +m→∞ P +� +sup +t≤τ 0m(S),x∈Rd |u(t, x)| > R +� ++ P +� +τ 1(S) < T +� ++ P +� +τ 2(S) < T +� +≤ N1 +Rp + N2 +Sp , +where N1 = N1(u0, α, β, γ, d, p, S, K, T) and N2 = N2(u0, h, α, β, γ, d, p, K, T). The lemma +is proved by letting R → ∞ and S → ∞ in order. +□ +Proof of Theorem 3.5. Step 1. (Uniqueness). +Suppose u, ¯u ∈ Hγ +p,loc are nonnegative +solutions of equation (3.1). By Definition 2.13, there are bounded stopping times τn (n = +1, 2, · · · ) such that +τn ↑ ∞ +and +u, ¯u ∈ Hγ +p(τn). +Fix n ∈ N. Note that u, ¯u ∈ C([0, τn]; C(Rd)) almost surely and +E sup +t≤τn +sup +x∈Rd |u(t, x)|p + E sup +t≤τn +sup +x∈Rd |¯u(t, x)|p < ∞. +(4.33) + +26 +BEOMSEOK HAN +Then, for m ∈ N, define +τ 1 +m,n := inf +� +t ≥ 0 : sup +x∈Rd |u(t, x)| > m +� +∧ τn, +τ 2 +m,n := inf +� +t ≥ 0 : sup +x∈Rd |¯u(t, x)| > m +� +∧ τn, +and +τm,n := τ 1 +m,n ∧ τ 2 +m,n. +(4.34) +Due to (4.33), τ 1 +m,n and τ 2 +m,n are well-defined stopping times; thus, τm,n is a stopping time. +Observe that u, ¯u ∈ Hγ +p(τm,n) and τm,n ↑ τn as m → ∞ almost surely. Fix m ∈ N. Notice +that u, ¯u ∈ Hγ +p(τm,n) are solutions to equation +∂α +t v = Lv + ¯bi � +v2ρm(v) +� +xi + ∂β +t +ˆ t +0 +σ(v)dWt, +0 < t ≤ τm,n; +v(0, ·) = u0, +where Lv = aijvxixj + bivxi + cv. By the uniqueness result in Lemma 4.3, we conclude that +u = ¯u in Hγ +p(τm,n) for each m ∈ N. The monotone convergence theorem yields u = ¯u in +Hγ +p(τn), which implies u = ¯u in Hγ +p,loc. +Step 2 (Existence.). Let T < ∞. For m ∈ N, define τ m +m and u as in Remark 4.4. Observe +that +P (τ m +m < T) ≤ P +� +sup +t≤T,x∈Rd |u(t, x)| ≥ m +� +. +Indeed, if τ m +m < T, then supt≤τ m +m ,x∈Rd |u(t, x)| = supt≤τ m +m ,x∈Rd |um(t, x)| = m almost surely. +Then, by Lemma 4.9, we have +lim sup +m→∞ P (τ m +m < T) ≤ lim sup +m→∞ P +� +sup +t≤T,x∈Rd |u(t, x)| ≥ m +� += 0 +Since T < ∞ is arbitrary, τ m +m → ∞ in probability. In addition, we conclude that τ m +m ↑ ∞ +almost surely, because τ m +m is increasing in m. +Last, set τm := τ m +m ∧ m. Note that (see Remark 4.4) +u(t, x) = um(t, x) +for +t ∈ [0, τm]. +Observe that supx∈Rd |um(t, x)| ≤ m for t ∈ [0, τm]; thus, um satisfies (3.1) almost every- +where t ∈ [0, τm] almost surely. Because u = um for t ∈ [0, τm] and um ∈ Hγ +p(τm), it follows +that u ∈ Hγ +p(τm) and u satisfies (3.1) for all t ≤ τm almost surely. We have u ∈ Hγ +p,loc +because τm ↑ ∞ as m → ∞ almost surely. The theorem is proved. +□ +Proof of Theorem 3.10. The proof of Theorem 3.10 is motivated by [26, Corollarly 5.11]. +Since q > p, by Theorem 3.5, there exists a unique solution ¯u ∈ Hγ +q,loc satisfying equation +(3.1). By Definition 2.13, there exists τn such that τn → ∞ almost surely as n → ∞, +u ∈ Hγ +p(τn) and ¯u ∈ Hγ +q(τn). Fix n ∈ N. Because 2+αd +αγ +< p < q, we can define τm,n (m ∈ N) +as in (4.34). Notice that for any p0 > p, we have +u ∈ Lp0(τm,n) + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +27 +since +E +ˆ τm,n +0 +ˆ +R +|u(t, x)|p0dxdt ≤ mp0−pE +ˆ τm,n +0 +ˆ +R +|u(t, x)|pdxdt < ∞. +Observe that ¯biuuxi ∈ Hγ−2 +q +(τm,n) ⊂ H−2 +q (τm,n). Indeed, similar to (4.6), +E +ˆ τm,n +0 +���� +1 +2 +¯bi(s, ·) +� +(u(s, ·))2� +xi +���� +q +Hγ−2 +q +ds ≤ NE +ˆ τm,n +0 +ˆ +R +|u(s, x)|2qdxds < ∞. +Additionally, we have +auxx ∈ H−2 +q (τm,n), +bux ∈ H−1 +q (τm,n), +and +cu ∈ Lq(τm,n). +Therefore, because Lq(τm,n) ⊂ H−1 +q (τm,n) ⊂ H−2 +q (τm,n), +aijuxixj + biuxi + cu + ¯biuuxi ∈ H−2 +q (τm,n). +(4.35) +Similar to (4.12), we have +∥σ(u)η∥q +Hγ−2+c0 +q +(τm,n,l2) ≤ N +ˆ τm,n +0 +∥h(t, ·)∥q +Lq dt < ∞. +(4.36) +Thus, we have +σ(u)η ∈ Hγ−2+c0 +q +(τm,n, l2) ⊂ H−2+c0 +q +(τm,n, l2). +(4.37) +Due to (4.35), (4.37), and Lemma 4.2, u is in Lq(τm,n) and u satisfies (3.1) for almost +everywhere t ≤ τm,n almost surely. On the other hand, since ¯biuuxi ∈ Hγ−2 +q +(τm,n) and +σ(u)η ∈ Hγ−2+c0 +q +(τm,n, l2), Lemma 4.2 implies that there exists v ∈ Hγ +q(τm,n) satisfying +∂α +t v = Lv + ¯biuuxi + ∂α +t +ˆ t +0 +σk(u)dWt, +0 < t ≤ τm,n ; +v(0, ·) = u0, +(4.38) +where Lv = aijvxixj + bivxi + cv. In (4.38), note that ¯biuuxi and σk(u) are used instead of +¯bivvxi and σk(v). Moreover, because u ∈ Lq(τm,n) satisfies equation (4.38), ¯v := u − v ∈ +Lq(τm,n) satisfies +∂α +t ¯v = aij¯vxixj + bi¯vxi + c¯v, +0 < t ≤ τm,n ; +¯v(0, ·) = 0. +By the deterministic version of Lemma 4.2, we have ¯v = 0 in Lq(τm,n); thus, u = v in +Lq(τm,n). Therefore, u is in Hγ +q(τm,n). As ¯u ∈ Hγ +q(τm,n) and ¯u satisfies equation (3.1), by +Lemma 4.3, we have u = ¯u in Hγ +q(τm,n). The theorem is proved. +□ +5. +Proof of Theorem 2.16 +This section provides a proof of the embedding theorem for solution spaces Hγ +p(τ). Con- +sider the following fractional diffusion equation +∂α +t u = ∆u +t > 0 ; +u(0, ·) = u0(·), +(5.1) +where α ∈ (0, 1) and u0(·) ∈ C∞ +c (Rd). It turns out that a fundamental solution p(t, x) exists +such that +p(t, ·) ∈ L1(Rd) +and +F(p(t, ·))(ξ) = Eα(−tα|ξ|2) +(e.g., [24, Theorem 2.1]), and the solution of (5.1) is given by +u(t, x) = (u0 ∗ p(t, ·))(x) = +ˆ +Rd u0(y)p(t, x − y)dy. + +28 +BEOMSEOK HAN +For convenience, define +qα,β(t, x) := +� +Iα−βp(t, x) +if +α ≥ β +Dβ−αp(t, x) +if +α < β. +We gather some facts related to p(t, x) and qα,β(t, x) (for more information, see [22, 23, 24]). +Lemma 5.1. Let d ∈ N, α ∈ (0, 1), β < α + 1/2, γ ∈ [0, 2), and σ ∈ R. +(i) For all t ̸= 0 and x ̸= 0, +∂α +t p(t, x) = ∆p(t, x) +and +∂tp(t, x) = ∆qα,1(t, x). +Additionally, for each x ̸= 0, +∂ +∂tp(t, x) → 0 as t ↓ 0. Moreover, +∂ +∂tp(t, x) is integrable +in Rd uniformly on t ∈ [δ, T] for any δ > 0. +(ii) There exist constants c = c(α, d) and N = N(α, d) such that if |x|2 ≥ tα, +|p(t, x)| ≤ N|x|−d exp +� +−c|x| +2 +2−α t− +α +2−α +� +. +(iii) Let n ∈ N. Then, there exists N = N(α, γ, n) such that +���Dσ +t Dn +x(−∆)γ/2qα,β(1, x) +��� ≤ N +� +|x|−d+2−γ−n ∧ |x|−d−γ−n� +. +(iv) The scaling properties hold. In other words, +qα,β(t, x) = t− αd +2 +α−βqα,β(1, xt− α +2 ), +(−∆)γ/2qα,β(t, x) = t− α(d+γ) +2 ++α−β(−∆)γ/2qα,β(1, xt− α +2 ). +Proof. To see (i), (ii), and (iii) follow from Theorems 2.1 and 2.3 of [23]. For (iv), see (5.2) +in [23]. +□ +Remark 5.2. To prove Theorem 2.16, we define the operators. +Let φ ∈ C∞ +c (Rd) and +f ∈ C∞ +c ((0, ∞) × Rd). Take a function g = (g1, g2, . . . ) satisfying the form +gk(t, x) = +��n +i=1 1 |(τi−1,τi]](t)gik(x) +for +k = 1, 2, . . . , n, +0 +for +k = n + 1, . . . +(5.2) +for some n ∈ N, where τi is the bounded stopping time and gik ∈ C∞ +c (Rd). Further, we set +T 1 +t φ(x) := +ˆ +Rd p(t, x − y)φ(y)dy, +(5.3) +T 2 +t f(t, x) := +ˆ t +0 +ˆ +Rd qα,1(t − r, x − y)f(r, y)dyds, +(5.4) +T 3 +t g(t, x) := +� +k +ˆ t +0 +ˆ +Rd qα,β(t − r, x − y)gk(r, y)dydwk +s . +(5.5) +It is well-known that T 1 +t φ, T 2 +t f, and T 3 +t g are solutions to +∂α +t u1 = ∆u1; +u1(0, ·) = φ, +∂α +t u2 = ∆u2 + f; +u2(0, ·) = 0, +∂α +t u3 = ∆u3 + ∂β +t +ˆ t +0 +gkdwk +s; +u3(0, ·) = 0, + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +29 +respectively (for more information, see [22, 23, 24]). +First, we provide a smoothing effect of T 1 +t , which implies the continuity of T 1 +t φ in t. +Lemma 5.3. Let τ ≤ T be a bounded stopping time and α ∈ (0, 1). If p ∈ (1, ∞), θ ∈ [0, α), +φ ∈ C∞ +c (Rd), and t ∈ (0, T), we have +e−t ��T 1 +t φ +�� +H +2θ +α +p +≤ Nt−θ∥φ∥Lp +(5.6) +where N = N(α, θ, d, p, T). +Proof. In the case of θ = 0, by Mink¨owski’s inequality, we have +∥T 1 +t φ∥Lp ≤ ∥p(t, ·) ∗ φ∥Lp ≤ ∥p(t, ·)∥L1∥φ∥Lp ≤ ∥φ∥Lp. +(5.7) +Thus, we have (5.6). For θ ∈ (0, α), observe that +∥e−tT 1 +t φ∥H2θ/α +p += ∥(1 − ∆)θ/α(e−tT 1 +t φ)∥Lp ≤ ∥e−tT 1 +t φ∥Lp + ∥(−∆)θ/α(e−tT 1 +t φ)∥Lp, +where the last inequality follows from Lemma 2.10 (v). As e−t ≤ (Nt−θ ∧ 1), we have +��e−tT 1 +t φ +�� +H2θ/α +p +≤ N +� +t−θ ��T 1 +t φ +�� +Lp + +���(−∆)θ/αT 1 +t φ +��� +Lp +� +. +(5.8) +By inequality (5.7), we have +t−θ∥T 1 +t φ∥Lp ≤ t−θ∥φ∥Lp. +(5.9) +On the other hand, Minkowski’s inequality yields +∥(−∆)θ/αT 1 +t h∥Lp = ∥(−∆)θ/α(p(t, ·) ∗ φ)∥Lp ≤ ∥((−∆)θ/αp)(t, ·)∥L1∥φ∥Lp. +(5.10) +Additionally Lemma 5.1 (iv), (ii), and (iii) imply +∥((−∆)θ/αp)(t, ·)∥L1 = +ˆ +Rd |((−∆)θ/αp)(t, x)|dx +≤ t−θ +ˆ +Rd |((−∆)θ/αp)(1, x)|dx +≤ N(α, θ, d, p)t−θ. +(5.11) +Then, by applying (5.11) to (5.10), we have +∥(−∆)θ/αT 1 +t φ∥Lp ≤ Nt−θ∥φ∥Lp. +(5.12) +Thus, by plugging in (5.9) and (5.12) into (5.8), we have (5.6). The lemma is proved. +□ +To proceed further, we introduce one of the embedding theorems for Slobodetskii’s spaces. +Lemma 5.4. If µp > 1 and p ≥ 1, for any continuous Lp-valued function φ and γ ≤ ρ, we +have the following: +∥φ(ρ) − φ(γ)∥p +Lp ≤ N(ρ − γ)µp−1 +ˆ ρ +γ +ˆ ρ +γ +1t>s +∥φ(t) − φ(s)∥p +Lp +|t − s|1+µp +dsdt +�0 +0 := 0 +� +, +(5.13) +where N = N(µ, p). In particular, +E +sup +0≤ss +E ∥φ(t) − φ(s)∥p +Lp +|t − s|1+µp +dsdt. +(5.14) + +30 +BEOMSEOK HAN +With the help of Lemma 5.4, we obtain the continuity of T 2 +t f and T 3 +t g on t ∈ [0, T], and +the H¨older continuity of T 1 +t φ, T 2 +t f, and T 3 +t g on [δ, T]. +First, we suggest the H¨older continuity of T 1 +t φ in t. +Lemma 5.5. Let T < ∞, δ > 0, and α ∈ (0, 1). If p ∈ (1, ∞) and µ ∈ (0, 1) satisfy +1 +αp < µ < 1 +α, +and φ ∈ C∞ +c (Rd), then +sup +δ≤ss +��T 1 +t+δφ − T 1 +s+δφ +��p +Lp +|t − s|1+αµp +dsdt. +By Minkowski’s inequality, +��T 1 +t+δφ − T 1 +s+δφ +�� +Lp ≤ +ˆ +Rd |p(t + δ, y) − p(s + δ, y)| dy ∥φ∥Lp . +Then, by the fundamental theorem of calculus, the change of variable, and Lemma 5.1 (i) +- (iii), +ˆ T−δ +0 +ˆ T−δ +0 +1t>s|t − s|−1−αµp ��T 1 +t+δφ − T 1 +s+δφ +��p +Lp dsdt +≤ +ˆ T−δ +0 +ˆ T−δ +0 +1t>s|t − s|−1−αµp +�ˆ +Rd |p(t + δ, y) − p(s + δ, y)| dy ∥φ∥Lp +�p +dsdt += +ˆ T−δ +0 +t−1−αµp +ˆ T−t +δ +�ˆ +Rd |p(t + s, y) − p(s, y)| dy +�p +dsdt ∥φ∥p +Lp +≤ +ˆ T−δ +0 +t−1−αµp+p +ˆ T−t +δ +�ˆ +Rd sup +r∈[δ,T] +|∂tp(r, y)| dy +�p +dsdt ∥φ∥p +Lp +≤ N ∥φ∥p +Lp . +(5.16) +Thus, we have (5.15). The lemma is proved. +□ +Remark 5.6. It should be remarked that we assume δ > 0 in Lemma 5.5, and it is a +sufficient condition. Indeed, if we try δ = 0, the term +ˆ T +0 +t−1−αµp +ˆ T−t +0 +�ˆ +Rd |p(t + s, y) − p(s, y)| dy +�p +dsdt +in (5.16) blows up. +Next we introduce the continuities of T 2 +t f and T 3 +t g. + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +31 +Lemma 5.7. Let α ∈ (0, 1). +(i) If p ∈ (1, ∞), θ ∈ (0, α), and µ ∈ (0, 1) satisfy +p > +1 +α − θ, +µ ∈ +� 1 +αp, α − θ +α +� +and f ∈ Lp((0, T) × Rd), then for t, s ∈ (0, T), +��T 2 +t f − T 2 +s f +��p +H2θ/α +p +≤ N|t − s|αµp−1 +ˆ T +0 +∥f(r, ·)∥p +Lp dr +(5.17) +where N = N(α, θ, d, p, T). Additionally, +sup +0≤s +1 +(α − β − θ) ∧ 1/2 + 1/2, +µ ∈ +� 1 +αp, (α − β − θ) ∧ 1/2 + 1/2 +α +� +, +and g ∈ Lp(τ, l2), then, for t, s ∈ (0, T), we have +E +��T 3 +t∧τg − T 3 +s∧τg +��p +H2θ/α +p +(l2) ≤ N|t − s|αµp−1∥g∥p +Lp(τ,l2), +(5.19) +where N = N(α, β, θ, d, p, T). Additionally, +E +sup +0≤s 0 and γ > 0. Notice that (5.13) yields +��T 2 +ρ f − T 2 +γ f +��p +H2θ/α +p +≤ N|ρ − γ|αµp−1 +ˆ T +0 +ˆ T +0 +1t>s +��T 2 +t f − T 2 +s f +��p +H2θ/α +p +|t − s|1+αµp +dsdt. +(5.21) +Then, by definition of T 2 +t f (see (5.4)), +��T 2 +t f − T 2 +s f +�� +H2θ/α +p +≤ +���� +ˆ t +s +ˆ +Rd qα,1(t − r, y)f(r, · − y)dydr +���� +H2θ/α +p ++ +���� +ˆ s +0 +ˆ +Rd (qα,1(t − r, y) − qα,1(s − r, y)) f(r, · − y)dydr +���� +H2θ/α +p +. +(5.22) +Set +I1 := +ˆ T +0 +ˆ T +0 +1t>s +��� +´ t +s +´ +Rd qα,1(t − r, y)f(r, · − y)dydr +��� +p +H2θ/α +p +|t − s|1+αµp +dsdt, +I2 := +ˆ T +0 +ˆ T +0 +1t>s +��´ s +0 +´ +Rd (qα,1(t − r, y) − qα,1(s − r, y)) f(r, · − y)dydr +��p +H2θ/α +p +|t − s|1+αµp +dsdt. +(5.23) + +32 +BEOMSEOK HAN +Then, apply (5.22) and (5.23) to (5.21), +��T 2 +t f − T 2 +s f +��p +H2θ/α +p +≤ |t − s|αµp−1(I1 + I2). +(5.24) +To deal with I1, we employ Minkowski’s inequality, the change of variable, and Lemma 2.10 +(v). Then, +I1 ≤ +ˆ T +0 +t−1−αµp +�ˆ t +0 +ˆ +Rd +��� +� +(1 − ∆)θ/αqα,1 +� +(r, y) +��� dydr +�p +dt +ˆ T +0 +∥f(s, ·)∥p +Lp ds +≤ I11 + I12, +(5.25) +where +I11 := +ˆ T +0 +t−1−αµp +�ˆ t +0 +ˆ +Rd |qα,1(r, y)| dydr +�p +dt +ˆ T +0 +∥f(s, ·)∥p +Lp ds, +I12 := +ˆ T +0 +t−1−αµp +�ˆ t +0 +ˆ +Rd +��� +� +∆θ/αqα,1 +� +(r, y) +��� dydr +�p +dt +ˆ T +0 +∥f(s, ·)∥p +Lp ds. +Because µ < 1, Lemma 5.1 (iv) and (iii) yield +I11 = +ˆ T +0 +t−1−αµp+αpdt +�ˆ +Rd |qα,1(1, y)| dy +�p ˆ T +0 +∥f(s, ·)∥p +Lp ds +≤ N +ˆ T +0 +∥f(s, ·)∥p +Lp ds. +(5.26) +Similarly, since µ < 1 − θ/α, +I12 = +ˆ T +0 +t−1−αµp+αp−θpdt +�ˆ +Rd +��� +� +∆θ/αqα,1 +� +(1, y) +��� dy +�p ˆ T +0 +∥f(s, ·)∥p +Lp ds +≤ N +ˆ T +0 +∥f(s, ·)∥p +Lp ds. +(5.27) +Thus, by applying (5.26) and (5.27) to (5.25), we have +I1 ≤ N +ˆ T +0 +∥f(s, ·)∥p +Lpds. +(5.28) +Next, we address I2. Similar to the case for I1, we have +I2 ≤ +ˆ T +0 +´ T−t +0 +�´ s +0 +´ +Rd +��(1 − ∆)θ/α (qα,1(t + r, y) − qα,1(r, y)) +�� dy∥f(s − r, ·)∥Lpdr +�p ds +t1+αµp +dt +≤ I21 + I22, +(5.29) +where +I21 := +ˆ T +0 +´ T−t +0 +�´ s +0 +´ +Rd |qα,1(t + r, y) − qα,1(r, y)| dy∥f(s − r, ·)∥Lpdr +�p ds +t1+αµp +dt, +I22 := +ˆ T +0 +´ T−t +0 +�´ s +0 +´ +Rd +��∆θ/α (qα,1(t + r, y) − qα,1(r, y)) +�� dy∥f(s − r, ·)∥Lpdr +�p ds +t1+αµp +dt. + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +33 +Since µ < 1, by Minkowski’s inequality and the fundamental theorem of calculus, we have +I21 ≤ +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +ˆ +Rd |qα,1(t + r, y) − qα,1(r, y)| dydr +�p +dt +ˆ T +0 +∥f(s, ·)∥p +Lp(Rd)ds +≤ +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +ˆ t+r +r +ˆ +Rd |qα,2(s, y)| dydsdr +�p +dt +ˆ T +0 +∥f(s, ·)∥p +Lp(Rd)ds +≤ N +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +rα−1 − (t + r)α−1dr +�p +dt +ˆ T +0 +∥f(s, ·)∥p +Lp(Rd)ds +≤ N +ˆ T +0 +t−1−αµp+αpdt +ˆ T +0 +∥f(s, ·)∥p +Lp(Rd)ds +≤ N +ˆ T +0 +∥f(s, ·)∥p +Lp(Rd)ds. +(5.30) +Additionally, since µ < 1 − θ/α, +I22 ≤ +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +ˆ +Rd +ˆ t+r +r +���(∆θ/αqα,2(s, y) +��� dsdydr +�p +dt +ˆ T +0 +∥f(s, ·)∥p +Lpds +≤ N +ˆ T +0 +t−1−αµp+αp−θpdt +ˆ T +0 +∥f(s, ·)∥p +Lpds +≤ N +ˆ T +0 +∥f(s, ·)∥p +Lpds. +(5.31) +Therefore, by employing (5.30) and (5.31) to (5.29), we have +I2 ≤ N +ˆ T +0 +∥f(s, ·)∥p +Lpds, +(5.32) +and thus by combining (5.28) and (5.32) to (5.24), we have (5.17). +To obtain (5.18), employ (5.14) instead of (5.13) and repeat the proof word for word. +Proof of (ii) By (5.13), we have +E +��T 3 +ρ g − T 3 +γ g +��p +H2θ/α +p +(l2) ≤ N|ρ − γ|αµp−1 +ˆ T +0 +ˆ T +0 +1t>s +E +��T 3 +t g − T 3 +s g +��p +H2θ/α +p +(l2) +|t − s|1+αµp +dsdt. + +34 +BEOMSEOK HAN +Notice that the Burkholder-Davis-Gundy and Minkowski’s inequalities imply that +E +��T 3 +t g − T 3 +s g +��p +H2θ/α +p +(l2) +≤ N +ˆ +Rd E +�� +k +���� +ˆ t +s +ˆ +Rd((1 − ∆)θ/α qα,β)(t − r, y)gk(r, x − y)dydwk +r +���� +2�p/2 ++ +�� +k +���� +ˆ s +0 +ˆ +Rd +� +(1 − ∆)θ/α (qα,β(t − r, y) − qα,β(s − r, y)) +� +gk(r, x − y)dydwk +r +���� +2�p/2 +dx +≤ N +ˆ +Rd E +�ˆ t−s +0 +�ˆ +Rd +���((1 − ∆)θ/α qα,β)(t − s − r, y) +��� |g(s + r, x − y)|l2dy +�2 +dr +�p/2 ++ +�ˆ s +0 +�ˆ +Rd +���(1 − ∆)θ/α (qα,β(t − r, y) − qα,β(s − r, y)) +��� |g(r, x − y)|l2dy +�2 +dr +�p/2 +dx +≤ NE +�ˆ t−s +0 +�ˆ +Rd +���((1 − ∆)θ/α qα,β)(t − s − r, y) +��� dy +�2 +∥g(s + r, ·)∥2 +Lp(l2)dr +�p/2 ++ NE +�ˆ s +0 +�ˆ +Rd +���(1 − ∆)θ/α (qα,β(t − r, y) − qα,β(s − r, y)) +��� dy +�2 +∥g(r, ·)∥2 +Lp(l2)dr +�p/2 +. +Then, set +I3 := +ˆ T +0 +ˆ T +0 +1t>s|t − s|−1−αµpE +�ˆ t−s +0 +A(t, s, r)∥g(s + r, ·)∥2 +Lp(l2)dr +�p/2 +dsdt, +I4 := +ˆ T +0 +ˆ T +0 +1t>s|t − s|−1−αµpE +�ˆ s +0 +B(t, s, r)∥g(r, ·)∥2 +Lp(l2)dr +�p/2 +dsdt, +where +A(t, s, r) = +�ˆ +Rd | (1 − ∆)θ/α qα,β(t − s − r, y)|dy +�2 +, +B(t, s, r) = +�ˆ +Rd +���(1 − ∆)θ/α (qα,β(t − r, y) − qα,β(s − r, y)) +��� dy +�2 +. +Note that Minkowski’s inequality and Lemma 2.10 (v) imply that +I3 ≤ +ˆ T +0 +t−1−αµp +�ˆ t +0 +�ˆ +Rd +���((1 − ∆)θ/α qα,β)(t − r, y) +��� dy +�2 +dr +�p/2 +dt∥g∥p +Lp(T,l2) +≤ I31 + I32, +(5.33) +where +I31 := +ˆ T +0 +t−1−αµp +�ˆ t +0 +�ˆ +Rd |qα,β(r, y)|dy +�2 +dr +�p/2 +dt∥g∥p +Lp(T,l2), +I32 := +ˆ T +0 +t−1−αµp +�ˆ t +0 +�ˆ +Rd +��� +� +∆θ/αqα,β +� +(r, y) +��� dy +�2 +dr +�p/2 +dt∥g∥p +Lp(T,l2). + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +35 +Since 1 +α +� +α − β + 1 +2 +� +> µ, by Lemma 5.1 (iii), we have +I31 = +ˆ T +0 +t−1−αµp +�ˆ t +0 +�ˆ +Rd |qα,β(r, y)|dy +�2 +dr +�p/2 +dt∥g∥p +Lp(T,l2) += +ˆ T +0 +t−1−αµp +�ˆ t +0 +r2(α−β)dr +�p/2 +dt +�ˆ +Rd |qα,β(1, y)|dy +�p +∥g∥p +Lp(T,l2) += +ˆ T +0 +t−1+(α−β+1/2−αµ)pdt +�ˆ +Rd |qα,β(1, y)|dy +�p +∥g∥p +Lp(T,l2) +≤ N∥g∥p +Lp(T,l2). +(5.34) +Similarly, as 1 +α +� +α − β − θ + 1 +2 +� +> µ, +I32 ≤ +ˆ T +0 +t−1+(−αµ+α−β−θ+1/2)pdt +�ˆ +Rd |(∆θ/αqα,β)(1, y)|dy +�p +∥g∥p +Lp(T,l2) +≤ N∥g∥p +Lp(T,l2). +(5.35) +Therefore, by employing (5.34) and (5.35) to (5.33), we have +I3 ≤ N∥g∥p +Lp(T,l2). +(5.36) +In the case of I4, by Minkowski’s inequality and Lemma 2.10 (v), we have +I4 ≤ I41 + I42. +Further, +I41 := +ˆ T +0 +t−1−αµpE +ˆ T−t +0 +�ˆ s +0 +�ˆ +Rd |C(t, r, y)|dy +�2 +∥g(s − r, ·)∥2 +Lp(l2)dr +�p/2 +dsdt, +I42 := +ˆ T +0 +t−1−αµpE +ˆ T−t +0 +�ˆ s +0 +�ˆ +Rd |∆θ/αC(t, r, y)|dy +�2 +∥g(s − r, ·)∥2 +Lp(l2)dr +�p/2 +dsdt, +where +C(t, r, y) = qα,β(t + r, y) − qα,β(r, y). +We address I41. +By Minkowski’s inequality, the fundamental theorem of calculus, and +Lemma 5.1, we have +I41 ≤ N +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +�ˆ +Rd |qα,β(t + r, y) − qα,β(r, y)|dy +�2 +dr +�p/2 +dt∥g∥p +Lp(T,l2) +≤ N +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +�ˆ t+r +r +ˆ +Rd |qα,β+1(s, y)|dyds +�2 +dr +�p/2 +dt∥g∥p +Lp(T,l2) +≤ NH1∥g∥p +Lp(T,l2), +(5.37) +where +H1 := +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +�ˆ t+r +r +sα−β−1ds +�2 +dr +�p/2 +dt +(5.38) + +36 +BEOMSEOK HAN +and N = N(α, β, d, p, T). Next, we claim that +H1 < ∞ +(5.39) +To demonstrate (5.39), set +χ(t) := +ˆ T−t +0 +�ˆ t+r +r +sα−β−1ds +�2 +dr. +Furthermore, +H1 = +ˆ T +0 +t−1−αµp(χ(t))p/2dt +(5.40) +Depending on the range of α − β, we consider the following five cases. +(Case 1.) −1/2 < α − β < 0 +For t ∈ (0, T), we have +0 ≤ χ(t) ≤ N +ˆ T−t +0 +r2(α−β) − (t + r)2(α−β)dr ≤ Nt2(α−β)+1, +(5.41) +where N = N(α, β). Then, since 1 +α(α − β + 1/2) > µ, by combining (5.40) and (5.41), +H1 ≤ N(α, β, p) +ˆ T +0 +t−1+p(−αµ+α−β+1/2)dt < ∞. +(Case 2.) α − β = 0 +Notice that +χ(t) = +ˆ T−t +0 +�ˆ t+r +r +s−1ds +�2 +dr = +ˆ T−t +0 +� +log +�t + r +r +��2 +dr. +Obviously, χ(0) = 0. Note that +χ′(t) ≤ 2 +ˆ ∞ +0 +1 +1 + r log +�1 + r +r +� +dr = 2 +ˆ ∞ +0 +x(ex − 1)−1dx = π2/3 +on t ∈ (0, T/2). Thus, +0 ≤ χ(t) = χ(t) − χ(0) = +ˆ t +0 +χ′(s)ds ≤ π2 +3 t +(5.42) +on t ∈ (0, T/2). Additionally, χ(t) ≤ N on t ∈ (T/2, T). Therefore, +H1 ≤ N +ˆ T/2 +0 +t−1+(−αµ+1/2)pdt + N < ∞, +where N = N(α, β, p). +(Case 3.) 0 < α − β < 1/2 +Observe that χ is twice continuously differentiable, and +χ′(t) = (α − β)−2 � +−T 2(α−β) − (T − t)2(α−β) + 2T α−β(T − t)α−β� ++ 2(α − β)−1 +ˆ T−t +0 +(t + r)2(α−β)−1 − (t + r)α−β−1rα−βdr, + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +37 +and +χ′′(t) = 2(α − β)−1(T − t)2(α−β)−1 − 2(α − β)−1T α−β(T − t)α−β−1 ++ 2(α − β)−1T α−β−1(T − t)α−β − 2(α − β)−1t2(α−β)−1 +− 2(α − β)−1(α − β − 1) +ˆ T−t +0 +(t + r)α−β−2rα−βdr. +(5.43) +In addition, χ(0) = χ′(0) = 0. Then, by using the fundamental theorem of calculus and +α − β ∈ (0, 1/2), we obtain +χ(t) = +ˆ t +0 +ˆ s +0 +χ′′(ρ)dρds +≤ 2(α − β)−1 +ˆ t +0 +ˆ s +0 +(T − ρ)2(α−β)−1dρds +− 2(α − β)−1(α − β − 1) +ˆ t +0 +ˆ s +0 +ˆ T−ρ +0 +(ρ + r)α−β−2rα−βdrdρds +≤ N +ˆ t +0 +� +T 2(α−β) − (T − s)2(α−β)� +ds + N +ˆ t +0 +ˆ s +0 +ˆ T−ρ +0 +(ρ + r)2(α−β)−2drdρds +≤ N +ˆ t +0 +s2(α−β)ds + N +ˆ t +0 +ˆ s +0 +T 2(α−β)−1drdρds +≤ Nt2(α−β)+1, +(5.44) +where N = N(α, β, T). Thus, +H1 ≤ N(α, β, p, T) +ˆ T +0 +t−1+p(−αµ+α−β+1/2)dt < ∞. +(Case 4.) α − β = 1/2 +Because χ(0) = χ′(0) = 0, by the fundamental theorem of calculus, we have +χ(t) = +ˆ t +0 +ˆ s +0 +χ′′(ρ)dρds +≤ N +ˆ t +0 +ˆ s +0 +(T − ρ)1/2 + N +ˆ T−ρ +0 +(ρ + r)−3/2r1/2drdρds +≤ Nt2(1 + | log t|), +where N = N(T). Therefore, +H1 ≤ N(p, T) +ˆ T +0 +t−1+p(−αµ+1)(1 + | log t|)p/2dt < ∞. +(Case 5.) α − β > 1/2 + +38 +BEOMSEOK HAN +Similar to before, χ(0) = χ′(0) = 0. Additionally, as in (5.44), we have +χ(t) = +ˆ t +0 +ˆ s +0 +χ′′(r)drds +≤ N +ˆ t +0 +� +T 2(α−β) − (T − s)2(α−β)� +ds + N +ˆ t +0 +ˆ s +0 +ˆ T−ρ +0 +(ρ + r)2(α−β)−2drdρds +≤ N +ˆ t +0 +sds + N +ˆ t +0 +ˆ s +0 +dρds +≤ Nt2, +where N = N(α, β, T). Therefore, +H1 ≤ N(α, β, p, T) +ˆ T +0 +t−1+p(−αµ+1)dt < ∞. +Thus, we have (5.39). Then, by combining (5.39) and (5.37), we have I41 ≤ N∥g∥p +Lp(T,l2). +Next, we deal with I42. Minkowski’s inequality, the fundamental theorem of calculus, +and Lemma 5.1 yield +I42 ≤ N +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +�ˆ +Rd |∆θ/αqα,β(t + r, y) − ∆θ/αqα,β(r, y)|dy +�2 +dr +�p/2 +dt∥g∥p +Lp(T,l2) +≤ N +ˆ T +0 +t−1−αµp +�ˆ T−t +0 +�ˆ t+r +r +ˆ +Rd |∂s∆θ/αqα,β(s, y)|dyds +�2 +dr +�p/2 +dt∥g∥p +Lp(T,l2) +≤ NH2∥g∥p +Lp(T,l2), +where N = N(α, β, d, p, T) and H2 := +´ T +0 t−1−αµp +�´ T−t +0 +�´ t+r +r +sα−β−θ−1ds +�2 +dr +�p/2 +dt. +Similar to the case of H1 (see (5.38) and (5.39)), we demonstrate that +H2 < ∞ +by considering five cases for α − β − θ instead of α − β. Then, we have I42 ≤ N∥g∥p +Lp(T,l2). +The lemma is proved. +□ +Proof of Theorem 2.16. It suffices to show that the assertion holds for τ = T. Indeed, +assume the results holds for Hγ +p(T), and let τ ≤ T be a bounded stopping time and u ∈ +Hγ +p(τ). Then, by Definition 2.4 for ε > 0, there exists (f, g) ∈ Hγ−2 +p +(τ)×Hγ−2+c0 +p +(τ, l2) such +that +∂α +t u = f + ∂β +t +ˆ t +0 +gkdwk +t ; +u(0, ·) = u0(·) +and +∥u∥Hγ +p(τ) + ∥u0∥Uα,γ +p ++ ∥f∥Hγ−2 +p +(τ) + ∥g∥Hγ−2+c0 +p +(τ,l2) ≤ ∥u∥Hγ +p(τ) + ε. +Set ¯f := (f − ∆u)1t≤τ and ¯g := g1t≤τ ; thus, u satisfies +∂α +t u = ∆u + ¯f + ∂β +t +ˆ t +0 +¯gkdwk +t , +0 < t ≤ τ ; +u(0, ·) = u0(·). +(5.45) + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +39 +In contrast, by [25, Theorem 2.18], there exists v ∈ Hγ +p(T) such that v satisfies +∂α +t v = ∆v + ¯f + ∂β +t +ˆ t +0 +¯gkdwk +t , +0 < t ≤ τ ; +v(0, ·) = u0(·). +Additionally, +∥v∥Hγ +p (T) ≤ N +��� ¯f +�� +Hγ−2 +p +(T) + ∥¯g∥Hγ−2+c0 +p +(T,l2) + ∥u0∥Uα,γ +p +� +≤ N +� +∥u∥Hγ +p(τ) + ∥u0∥Uα,γ +p ++ ∥f∥Hγ−2 +p +(τ) + ∥g∥Hγ−2+c0 +p +(τ,l2) +� +≤ N∥u∥Hγ +p(τ) + Nε, +(5.46) +where N is independent of ε. Because v ∈ Hγ +p(T), v ∈ C +� +[0, T]; Hγ−2ν +p +� +almost surely and +E sup +t≤T +∥v(t, ·)∥p +Hγ−2ν +p +≤ N∥v∥p +Hγ +p(T) +(5.47) +by the hypothesis. Therefore, due to τ ≤ T, (5.46) and (5.47) yield +E sup +t≤τ +∥v(t, ·)∥p +Hγ−2ν +p +≤ E sup +t≤T +∥v(t, ·)∥p +Hγ−2ν +p +≤ N∥v∥p +Hγ +p(T) ≤ N∥u∥p +Hγ +p(τ) + Nε, +(5.48) +where N is independent of ε. Note that ¯u := u − v satisfies +∂α +t ¯u = ∆¯u, +0 < t ≤ τ; +¯u(0, ·) = 0. +Then, by the deterministic version of [25, Theorem 2.18], we have u(t, ·) = v(t, ·) for almost +every (ω, t) ∈ Ω × [0, τ]. Thus, v is an Hγ−2ν +p +-valued continuous version of u. Additionally, +from (5.48) +E sup +t≤τ +∥u(t, ·)∥p +Hγ−2ν +p +≤ N∥u∥p +Hγ +p(τ) + Nε. +In addition, as ε > 0 is arbitrary and N is independent of ε, we have (2.8). Furthermore, +observe that we have (2.10) similarly. Additionally, notice that (5.46) allows us to prove +the assertions with ∥f∥Hγ−2 +p +(T) + ∥g∥Hγ−2+c0 +p +(T,l2) + ∥u0∥Uα,γ +p +instead of ∥u∥Hγ +p(T). +Due to Lemma 2.10 (vi), we only consider the case γ = 2ν, where ν ∈ (0, 1) satisfies +(2.7). Moreover, by using the approximation to the identity, we may assume that u0 is +infinitely differentiable and compactly supported in x. Furthermore, we also assume that +f and g = (g1, g2, . . . ) denotes the function of the form satisfying (5.2) (see [26, Theorem +3.10]). Additionally, it should be remarked that u can be written as +u(t, x) = T 1 +t u0 + T 2 +t f + T 3 +t g +since u satisfies +∂α +t u = ∆u + f + ∂β +t +ˆ t +0 +gkdwk +t ; +u(0, ·) = u0(·) +for almost every (ω, t) ∈ |(0, τ]] (see [22, 23, 24]). + +40 +BEOMSEOK HAN +First, we prove (i). +To obtain the continuity of u, notice that T 1 +t u0 is a continuous +Lp-valued function in t on [0, T]. Indeed, by Remark 5.2 and Lemma 5.3, +∥T 1 +t u0 − u0∥Lp = N +���� +ˆ t +0 +(t − s)α−1T 1 +s ∆u0ds +���� +Lp +≤ N +ˆ t +0 +(t − s)α−1∥T 1 +s ∆u0∥Lpds +≤ N +ˆ t +0 +(t − s)α−1∥∆u0∥Lpds +≤ Ntα∥∆u0∥Lp +for t > 0. Then, we have +lim +t↓0 ∥T 1 +t u0 − u0∥Lp → 0. +(5.49) +Additionally, for t, h > 0, Lemma 5.3 applies +��T 1 +t+hu0 − T 1 +t u0 +�� +Lp ≤ +��� +T 1 +t+hu0 − u0 +� +− +� +T 1 +t u0 − u0 +��� +Lp +≤ +���� +ˆ t+h +0 +(t + h − s)α−1∆T 1 +s u0ds − +ˆ t +0 +(t − s)α−1∆T 1 +s u0ds +���� +≤ +���� +ˆ t+h +t +(t + h − s)α−1∆T 1 +s u0ds +���� +Lp ++ +���� +ˆ t +0 +(t + h − s)α−1∆T 1 +s u0ds − +ˆ t +0 +(t − s)α−1∆T 1 +s u0ds +���� +Lp +≤ +ˆ t+h +t +(t + h − s)α−1 ��∆T 1 +s u0 +�� +Lp ds ++ +ˆ t +0 +� +(t − s)α−1 − (t + h − s)α−1� ��∆T 1 +s u0 +�� +Lp ds +≤ α−1 (2hα + tα − (t + h)α) ∥∆u0∥Lp, +and thus +lim +h→0 +��T 1 +t+hu0 − T 1 +t u0 +�� +Lp = 0. +(5.50) +Because C∞ +c (Rd) is dense in Lp, (5.49) and (5.50) imply that T 1 +t is continuous on Lp. +For T 2 +t f and T 3 +t g, by combining (5.17), (5.19), Jensen’s inequality, and the Kolmogorov +continuity theorem (e.g., [28, Theorem 1.4.8]), we have T 2 +t f ∈ C([0, T]; Hγ−2ν +p +) and T 3 +t g ∈ +C([0, T]; Hγ−2ν +p +(l2)) almost surely. +To demonstrate (2.8), we recall that ν is taken as in (2.7). Then, Lemma 5.3 implies that +E sup +t≤T +��T 1 +t u0 +��p +Hγ−2ν +p +≤ NE∥u0∥p +Lp ≤ NE∥u0∥p +H +γ− 2 +αp +p +. +(5.51) +Additionally, as (2.7) is assumed, Lemma 2.10 (vi) and (5.17) with θ = α − αν, and +lims↓0 ∥T 2 +s f∥Hγ−2ν +p += 0 yield +E sup +t≤T +��T 2 +t f +��p +Hγ−2ν +p +≤ N∥f∥p +Hγ−2 +p +(T). +(5.52) + +STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE +41 +Furthermore, due to (2.7), Lemma 2.10 (vi), (5.19) with θ = α(1 − c0/2) − αν, and +lims↓0 ∥T 3 +s g∥Hγ−2ν +p += 0, we have +E sup +t≤T +��T 3 +t g +��p +Hγ−2ν +p +≤ N∥g∥p +Hγ−2+c0 +p +(T,l2), +(5.53) +where c0 is the constant introduced in (2.3). By combining (5.51), (5.52), and (5.53), we +obtain (2.8). +Next we prove (ii). Due to (5.15), (5.18), and (5.20) and Theorem 2.16 (i), we have +u ∈ Cαµ−1/p([δ, T]; Hγ−2ν +p +) almost surely. To demonstrate (2.10), choose µ and ν satisfy +(2.9). Observe that Lemma 5.5 implies that +E +sup +δ≤s0 if +� +max +v′∈∆n u⊤Av′ +� +− +� +min +u′∈∆m(u′)⊤Av +� +≤ ǫ. +1Note that quantifying the end-to-end speedups obtained by these methods can be subtle due to I/O overheads, +different access models [Aar15], and classical de-quantization algorithms [Tan19, CGL+20, GLG22]. +1 + +We assume that the payoff matrix A and the error-tolerance are given as input to an algorithm, and +that, for simplicity, ∥A∥max ≤ 1, i.e. the largest entry of A has magnitude at most 1 (this is without +loss of generality by rescaling A ← ∥A∥−1 +max A and ǫ ← ∥A∥−1 +max ǫ). The main goal of this paper is +to design improved zero-sum game solvers, i.e. algorithms that compute ǫ-approximate NEs. +Zero-sum games are foundational to theoretical computer science, optimization, and economics. +The problem of approximately solving zero-sum games is a natural formulation of approximate linear +programming (LP) and correspondingly, this problem is a prominent testbed for new optimization +techniques. Over the past decades there have been numerous advances in the computational com- +plexity of solving zero-sum games under various assumptions on problem parameter (see Section 1.3 +for a survey). Recent advancements in interior point methods (IPMs) for linear programming, e.g. +[vdBLL+21] and references therein (discussed in more detail in Section 1.3), solve zero sum-games +in time �O(mn + min(m, n)2.5).2 Further the linear programming algorithm of [vdB20], shows that +zero-sum games can be solved deterministically in �O((m+n)ω) time where ω < 2.373 is the current +matrix multiplication constant [AW21], or �O((m + n)3) without fast matrix multiplication. In this +paper, we primarily focus on sublinear-time algorithms for approximating NEs. +A well-known algorithm by [GK95] achieves a runtime of �O((m + n) · ǫ−2), which is the state- +of-the-art sublinear runtime amongst classical algorithms, without further problem assumptions. +Recently it has been shown that quantum algorithms can yield strikingly runtime improvements for +solving zero-sum games and their generalizations [LCW19, vAG19, LWCW21]. In particular, in 2019 +Li, Chakrabati and Wu [LCW19] gave a quantum algorithm for zero sum games in time �O(√m + n· +ǫ−4), and simultaneously van Apeldoorn and Gilyen [vAG19] gave an algorithm running in time +�O(√m + n · ǫ−3). These algorithms yield a quadratic improvement in the dimension dependence of +the best classical algorithm, at the cost of a higher error dependence. +The algorithms of [LCW19, vAG19, LWCW21] operate using a standard quantum oracle for A +(formally stated in Section 2), in which one can query the entries of A in superposition. We focus on +the algorithm of [vAG19] for the rest of this paper, as we focus on improving error dependence. The +[vAG19] algorithm generalizes the classical algorithm of Grigoriadis and Khachiyan [GK95], and +obtains a runtime improvement by speeding up a key dynamic Gibbs sampling subroutine required +by the [GK95] method. As we discuss in greater detail in Section 3, van Apeldoorn and Gilyen give +a quantum data structure to efficiently perform this sampling in time quadratically faster in the +dimension, which lies at the core of their algorithmic speedup. +Our result. +We give a new quantum algorithm for solving zero-sum games which improves upon +the runtime of the prior state-of-the-art quantum algorithm, due to [vAG19]. +Theorem 1 (informal, see Theorem 4). Let A ∈ Rm×n with ∥A∥max ≤ 1, and ǫ ∈ (0, 1). Given +a quantum oracle for A (defined in Section 2), there is an �O(√m + n · ǫ−2.5 + ǫ−3) time algorithm +which yields a classical output (u, v) ∈ ∆m × ∆n that is an ǫ-approximate NE with high probability. +Our new algorithm simultaneously improves the best known quantum [vAG19] and classical +[GK95] algorithms in the parameter regime where IPMs do not dominate sublinear algorithms. In +particular, it is faster than the classical �O((m+n)·ǫ−2) runtime of [GK95] whenever ǫ−1 = �O(m+n), +which includes the regime where [GK95] offers advantages over the �O((m + n)ω) runtime of the +[vdB20] IPM, as ω < 3. This is in contrast to the prior quantum rate of [vAG19], which does +not achieve an improvement upon [GK95] in the full parameter range where sublinear algorithms +2We use the �O notation to hide polylogarithmic dependences on problem parameters when convenient for exposi- +tion; see Section 2 for a more detailed statement of hidden parameters. In informal theorem statements, we use “with +high probability” to indicate a polylogarithmic dependence on the failure probability. +2 + +are currently preferable to IPMs. +For example, when m ≈ n and (up to logarithmic factors) +ǫ ∈ [n−c, n− 1 +2 ] where c = 1 +2(ω − 1), the rate of [GK95] is favorable to that of [vAG19] and state-of- +the-art IPMs [vdB20, CLS21].3 +1.2 +Dynamic Gibbs sampling +We obtain the improved error dependence in our zero-sum game solver by producing a new, faster +quantum data structure to perform the Gibbs sampling as used in the algorithm of [vAG19], which +may be of independent interest. Gibbs sampling is a fundamental algorithmic primitive — the basic +task is, given vector v ∈ Rn, sample from the probability distribution proportional to exp(v). Gibbs +sampling is used as a subroutine in many quantum and classical optimization algorithms, e.g. [BS17] +and follow-up works. In general, quantum algorithms can perform this task more efficiently using +amplitude estimation, which can boost the acceptance probability of rejection sampling schemes. +This strategy was implemented in [vAG19], which approximate the maximum entry vmax of v +using quantum maximum finding [DH96], uniformly sample i ∈ [n], and accept the sample with +probability exp(vi −vmax) ≤ 1 using quantum rejection sampling. We give a more detailed overview +of the [vAG19] Gibbs sampler and its complexity analysis in Section 3.2. +We give a data structure which quadratically improves the error dependence of the [vAG19] +Gibbs sampling subroutine runtime, from �O(√m + n· ǫ−1) per sample to an amortized �O(√m + n · +ǫ− 1 +2) per sample. A key fact which enables this improvement is that the Gibbs distributions one +samples from in the zero-sum game solver of [GK95] change slowly over time: the base vector v +receives bounded sparse updates in each iteration. By storing partial information about the Gibbs +distribution, namely an efficiently-computable overestimate to its entries which remains valid across +many consecutive iterations, we obtain an improved dynamic Gibbs sampler, which we also provide +a detailed overview of in Section 3.2. +We now define our notion of an approximate Gibbs sampler, and then state the dynamic sampling +problem we consider, which arises naturally in zero-sum game algorithms with sublinear runtimes. +Definition 1 (Approximate Gibbs oracle). For v ∈ Rn, its associated Gibbs distribution is pv ∈ ∆n +such that for all i ∈ [n], [pv]i ∝ exp(vi). We say Ogibbs +v +is a δ-approximate Gibbs oracle if it samples +from ˜p ∈ ∆n with ∥˜p − pv∥1 ≤ δ. +Problem 1 (Sampling maintenance). Let η > 0, δ ∈ (0, 1), and suppose we have a quantum oracle +for A ∈ Rm×n. Consider a sequence of T Update operations to a dynamic vector x ∈ Rm +≥0, each +of the form xi ← xi + η for some i ∈ [m]. In the sampling maintenance problem, in amortized +Tupdate time per Update we must maintain a δ-approximate Gibbs oracle, Osamp, for A⊤x which is +queryable in worst-case time Tsamp. +Our result. +We provide a quantum algorithm for solving Problem 1, which improves upon the +runtime implied by the corresponding component in the algorithm of [vAG19]. +Theorem 2 (informal, see Theorem 3). There is a quantum algorithm which solves Problem 1 with +high probability with max(Tsamp, Tupdate) = �O +�√n · Tη1.5� +and an initialization cost of �O +� +η3T 3� +. +Theorem 2 improves upon the solution to the sampling maintenance Problem 1 implied by +[vAG19] by a η− 1 +2 factor; in the setting of the [GK95] solver, where T = �O(ǫ−2) and η = Θ(ǫ), +this is an ǫ− 1 +2-factor improvement. +At a high level, our improvement is obtained by storing a +hint consisting of a vector which overestimates the true Gibbs distribution, and an approximate +3There is evidence that ω = 2 cannot be achieved with current techniques, e.g. [Alm21]. +3 + +Table 1: Algorithms for computing ǫ-approximate Nash equilibria of zero-sum games. +Hides polylogarithmic factors and assumes A ∈ Rm×n with ∥A∥max ≤ 1. +Method +Query model +Total runtime +interior point method [CLS21] +classical +max(m, n)ω +interior point method [vdBLL+21] +classical +mn + min(m, n)2.5 +extragradient [Nem04, Nes07] +classical +mn · ǫ−1 +stochastic mirror descent (SMD) [GK95] +classical +(m + n) · ǫ−2 +variance-reduced SMD [CJST19] +classical +mn + +� +mn(m + n) · ǫ−1 +[vAG19] +quantum +√ +m + n · ǫ−3 +Theorem 1 (our work) +quantum +√ +m + n · ǫ−2.5 + ǫ−3 +Table 2: Solutions to Problem 1, T = ǫ−2, η = ǫ. Hides polylogarithmic factors. +Method +Query model +Tsamp +Tupdate +explicit updates [GK95] +classical +1 +m + n +max-based rejection sampling [vAG19] +quantum +√ +m + n · ǫ−1 +√ +m + n · ǫ−1 +Theorem 2 (our work) +quantum +√ +m + n · ǫ− 1 +2 +√ +m + n · ǫ− 1 +2 +normalization factor, which are infrequently updated. Our maintained hint satisfies the desirable +properties that: (i) it remains valid for a batch of consecutive iterations, and (ii) the degree of +overestimation is bounded. The former property ensures a fast amortized update time, and the +latter ensures a fast sample time by lower bounding the acceptance probability of our quantum +rejection sampler. Our high-level strategy for maintaining improved hints is to repeatedly call our +sampling access to accurately estimate large entries of the Gibbs distribution, and to exploit stability +of the distribution under the setting of Problem 1. We discuss our dynamic Gibbs sampler in more +detail and compare it with previous methods for solving Problem 1 in Section 3.2. +The initialization cost of Theorem 2 is due to the current state-of-the-art in numerically stable +implementations of the quantum singular value transformation (SVT) framework of [GSLW19]. +This cost is also the cause of the additive �O(ǫ−3) term in Theorem 1. +We discuss this cost in +Appendix D; improvements to numerically stable implementations of [GSLW19] would be reflected +in the runtimes of Theorems 1 and 2. +1.3 +Related work +Quantum optimization and machine learning. +There are a wide array of quantum algorithms +for optimization and machine learning which make use of fundamental algorithmic primitives such +as amplitude amplification [BHMT02], the HHL algorithm [HHL09], and the quantum singular +value transformation [GSLW19]. For example, a number of works gave HHL-based algorithms for +a variety of machine learning tasks such as PCA [LMR14], SVMs [RML14], and recommendation +systems [KP16]. For more details see the survey article of [BWP+17]. +Most relevant to our current work are quantum algorithms for optimization problems. +For +example, Brandao and Svore [BS17] gave a quantum algorithm for SDP solving based on the Arora- +4 + +Kale algorithm [AK07], which was later improved by [VAGGdW20b]. There have also been quantum +IPM-based methods for LPs and SDPs [KP20]. +Additionally a series of works have considered +quantum algorithms for general convex optimization [CCLW20, vAGGdW20a], which make use of +Jordan’s algorithm for fast gradient estimation [Jor05, GAW19]. +In the area of zero-sum games, in addition to the works previously mentioned [vAG19, LCW19] +on ℓ1-ℓ1 games (where both players are ℓ1-constrained), there have been several works considering +different variants of zero-sum games. For example Li, Chakrabati and Wu [LCW19] gave quan- +tum algorithms for ℓ2-ℓ1 games with quadratic improvement on the dimension. Later Li, Wang, +Chakrabati and Wu [LWCW21] extended this algorithm to more general ℓq-ℓ1 games with q ∈ (1, 2]. +Zero-sum games. +Zero-sum games are a canonical modeling tool in optimization, economics and +machine learning [Neu28]. The classic extragradient (mirror prox) method [Nem04, Nes07] computes +an ǫ-approximate NE in �O(mn · ǫ−1) time; as discussed previously, the stochastic mirror descent +method of [GK95] obtains the same accuracy in time �O((m + n) · ǫ−2). An intermediate runtime +was recently obtained by [CJST19] using variance reduction, described in Table 1. +Improved runtimes are available under more fine-grained characterizations of the matrix A, such +as sparsity (e.g. number of nonzero entries per row or column) or numerical sparsity (e.g. rows and +columns with bounded ℓ1-to-ℓ2 norm ratios) [CJST20]. Notably, the [GK95] algorithm also offers +runtime improvements under a sparsity assumption, as does the algorithm of [vAG19] in certain +sparsity-to-accuracy ratio regimes. In this paper, we focus on NE algorithms in the general setting +(without further sparsity or numerical sparsity assumptions). +In parallel, a long line of research improving IPMs for solving linear programming [Kar84, +Ren88, LS14, LS19, vdBLSS20, JSWZ21] has led to a number of different zero-sum game solvers +with polylogarithmic runtime dependencies on the problem accuracy ǫ. The current state-of-the- +art variants of IPMs are [CLS21] and [vdBLL+21], which achieve runtimes of �O(max(m, n)ω) and +�O(mn + min(m, n)2.5) respectively. We refer readers to Table 1 for detailed comparisons. Finally, +for strongly polynomial runtimes (i.e. with no dependence on ǫ), which are outside the scope of this +paper, we refer readers to [DNV20] and references therein. +1.4 +Future work +Theorem 1’s ǫ dependence is within an ǫ− 1 +2 factor of matching classical counterparts. To the best +of our knowledge, removing this ǫ− 1 +2 overhead would represent the first quantum algorithm for a +natural optimization problem which improves upon classical counterparts across all parameters. +Both our work and [vAG19] solve Problem 1 by leveraging a powerful polynomial approximation- +based technique developed in [GSLW19], known as the quantum singular value transform (QSVT). In +both cases, QSVT is used with a polynomial of degree �O(ǫ−1). We note that in closely-related classi- +cal settings (discussed in [SV14]), Chebyshev polynomial-based approximations yield a quadratically +smaller degree. However, a boundedness requirement (due to the spectra of quantum gates) pre- +vents straightforwardly applying these constructions within QSVT. Sidestepping this barrier is a +natural avenue towards improving our work, which we leave as an open problem. +More generally, establishing optimal oracle query complexities of dynamic Gibbs sampling (e.g. +Problem 1) and solving zero-sum games are key problems left open by our work. These questions +are potentially more approachable than establishing tight time complexity characterizations. For +example, could max(Tsamp, Tupdate) be improved to �O(√n) in the context of Theorem 1, or can we +rule out such an improvement in the query model? +5 + +1.5 +Organization +In Section 2 we state the notation used throughout the paper, as well as the (classical and quantum) +computational models we assume. +In Section 3, we give a brief technical overview of the core +components of our algorithm used to prove Theorem 1: the stochastic gradient method our method +is built on, and an efficient quantum implementation of a key subroutine using a new dynamic Gibbs +sampler. Finally in Section 4 we give our new quantum sampler, and prove Theorem 2. +We aim to give a self-contained, but simplified, description of our algorithm in Section 3 to +improve the readability of the paper for readers with an optimization background unfamiliar with +quantum computing, and vice versa. In particular, we abstract away the core optimization machin- +ery (stochastic mirror descent) and quantum machinery (quantum SVT) developed in prior work +into the statements of Propositions 1 and 2, and focus on how we use these statements black-box +to build a faster algorithm. The proofs of these statements can be found in Appendices A and B. +2 +Preliminaries +General notation. +�O hides logarithmic factors in problem dimensions (denoted m and n), target +accuracies (denoted ǫ), and failure probabilities (denoted α). When discussing runtimes for Prob- +lem 1, we additionally use �O to hide logarithmic factors in the parameters η, T. For all i ∈ [n] we let +ei ∈ Rn denote the ith standard basis vector for i ∈ [n] when n is clear. ∥·∥p denotes the ℓp norm of +a vector. For A ∈ Rm×n, its ith row and jth column are respectively Ai:, A:j. For v ∈ Rn, diag (v) +is the diagonal n × n matrix with v as the diagonal. Conjugate transposes of A are denoted A∗; +when the matrix is real we use A⊤. The all-ones and all-zeros vectors of dimension n are 1n and +0n. Finally, throughout a := ⌈log2 m⌉ and b := ⌈log2 n⌉, so [m] ⊆ [2a] and [n] ⊆ [2b]. +Computation models. +We assume entries of A are w-bit reals for w = O(log(mn)), and work in +the word RAM model where w-bit arithmetic operations take O(1) time; for simplicity, we assume +mathematical operations such as trigonometric functions and radicals can also be implemented ex- +actly for w-bit words in O(1) time. Throughout, “quantum states” mean unit vectors, and “quantum +gates” or “oracles” O mean unitary matrices. We follow standard notation and identify a standard +basis vector ei for i ∈ [n] with |i⟩, an a-qubit state, in which i is represented in binary (i.e. more for- +mally, |i⟩ = |bin(i)⟩, and bin is omitted for brevity). We consider the standard model of quantum +access to oracles, in which the oracle O, which is defined by its operation on |s⟩ for all {0, 1}∗- +valued s (where length is clear from context), can be queried in superposition. If O is queried on +|v⟩ := � +s αs|s⟩, the result is O|v⟩ = � +s αi(O|s⟩). We use |g⟩, |g′⟩, etc. (when clear from context) +to denote arbitrary sub-unit vectors, which represent garbage states (unused in computations). The +tensor product of states |u⟩ and |v⟩ on a and b qubits is denoted |u⟩|v⟩, an (a + b)-qubit state. The +runtime of a quantum circuit is its maximum depth (in arithmetic gates on w-bit words). +Access model. +Throughout the paper, we assume a standard quantum oracle for accessing A +(recall ∥A∥max ≤ 1). In particular, by a quantum oracle for A we mean an oracle OA which, when +queried with |i⟩|j⟩|s⟩ for i ∈ [m], j ∈ [n], s ∈ {0, 1}w, reversibly writes Aij (in binary) to the third +register in O(1) time, i.e. OA|i⟩|j⟩|s⟩ = |i⟩|j⟩|s ⊕ Aij⟩ where ⊕ is bitwise mod-2 addition. +Given a quantum oracle for A, with two queries, by standard constructions one can construct +an oracle which places the value in the amplitude of the state rather than the register itself. More +6 + +formally, one can construct4 an O′ +A, which operates as: +O′ +A|0⟩|i⟩|j⟩ = +� +Aij|0⟩|i⟩|j⟩ + +� +1 − |Aij||1⟩|g⟩, for (i, j) ∈ [m] × [n]. +It is standard in the literature to (using ancilla qubits to store the output register where Aij is +written) construct such an O′ +A from OA under our classical model of computation, see e.g. [GR02]. +For simplicity, we omit discussion of ancilla qubits in the remainder of the paper and assume direct +access to O′ +A. We also note that there is ambiguity in the implementation of O′ +A in that the square +root is not unique, and that we have control over the signing used in this implementation. We will +use this flexibility crucially later in the paper, specifically Corollary 6. +3 +Overview of approach +In this section, we give an overview of the approach we take to prove our main results: an improved +quantum runtime for solving zero-sum games (Theorem 4) and an improved quantum data structures +for dynamic Gibbs sampling (Theorem 3). We organize this section as follows. +In Section 3.1, we state Algorithm 1, the optimization method framework we use to solve zero- +sum games. This framework is a generalization of the classical algorithm of [GK95]. We state its +guarantees in Proposition 1 and defer the proof to Appendix A. Algorithm 1 assumes access to +an approximate Gibbs oracle (Definition 1) for sampling from dynamic distributions as stated in +Problem 1. The bulk of our work is devoted to obtaining an efficient quantum implementation of +such an oracle (Theorem 3) and using this result we prove Theorem 4 at the end of Section 3.1. +In Section 3.2, we overview the main technical innovation of this paper, an improved solution to +Problem 1. Whereas prior work by [vAG19] solves Problem 1 at an amortized ≈ √m + n · ǫ−1 cost +per iteration, we show how to solve the problem at an amortized ≈ √m + n · ǫ− 1 +2 cost. We remark +that the only quantum components of our algorithm (quantum SVT and amplitude amplification) +are abstracted away by Proposition 2, which is proven in Appendix B. +3.1 +Solving matrix games with a Gibbs sampling oracle +Our proof of Theorem 4 uses an efficient implementation of the algorithmic framework stated in +Algorithm 1, based on stochastic mirror descent. In specifying Algorithm 1, we recall our earlier +Definition 1, which captures the approximate sampling access we require for Algorithm 1’s execution. +Algorithm 1: MatrixGameSolver(δ, η, T) +1 Input: A ∈ Rm×n, desired accuracy ǫ ∈ (0, 1), δ-approximate Gibbs oracles for the +(dynamic) vectors −A⊤xt and Ayt +2 Parameters: Gibbs sampler parameter δ ∈ (0, 1), step size η > 0, iteration count T +3 Initialize ˆu ← 0m, ˆv ← 0n, x0 ← 0m, and y0 ← 0n +4 for t = 0 to T − 1 do +5 +Independently sample jt, j′ +t ∈ [n] using Ogibbs +−A⊤xt and it, i′ +t ∈ [m] using Ogibbs +Ayt +6 +Update yt+1 ← yt + ηejt and xt+1 ← xt + ηeit +// Update iterates. +7 +Update ˆu ← ˆu + 1 +T ei′ +t and ˆv ← ˆv + 1 +T ej′ +t +// Update output. +8 return (ˆu, ˆv) +4This follows e.g. by calling the oracle to obtain the value of Aij in binary (interpreted as a signed number +between 0 and 1), adding an ancilla qubit, performing arithmetric to compute the rotation angle needed on that +ancilla, applying a tower of controlled rotation gates to an ancilla qubit using that rotation angle express in binary, +then calling the standard oracle a second time to uncompute the binary value of Aij. See e.g. [GR02] for details. +7 + +The main skeleton of Algorithm 1 (Lines 5-6) using exact oracles is identical to the method of +[GK95]. However, our framework builds upon [GK95] in the following three ways. +1. We tolerate total variation error in the sampling procedure via δ-approximate Gibbs oracles. +2. We provide a high-probability guarantee on the duality gap using martingale arguments. +3. We subsample the output to obtain a sparse solution yielding a comparable duality gap. +We remark that several of these improvements have appeared previously, either explicitly or +implicitly, in the stochastic gradient method literature. For example, an approximation-tolerant +stochastic gradient method was given in [CJST20], and our proofs of the high-probability guarantees +are based on arguments in [AL17, CDST19]. For completeness we give a self-contained proof of the +following guarantee on Algorithm 1 in Appendix A. +Proposition 1. Let A ∈ Rm×n satisfy ∥A∥max ≤ 1 and ǫ, α ∈ (0, 1). Let δ ≤ +ǫ +20, η = +ǫ +60, and +T = Θ(ǫ−2 log mn +α ) for an appropriate constant. With probability ≥ 1 − α, Algorithm 1 outputs an +ǫ-approximate NE for A. +Given Proposition 1 to obtain our faster zero-sum game solvers, we simply need to efficiently im- +plement the Gibbs sampling in Line 5. As introduced in Section 1, Problem 1, describes a dynamic +approximate Gibbs oracle sampling problem sufficient for this task. Indeed, solving two appropriate +parameterizations of Problem 1 provides the oracles needed by Algorithm 1. By combining Propo- +sition 1 with the following Theorem 3 (our solution to Problem 1, discussed in greater detail in +Section 3.2), we prove our main result Theorem 4. +Theorem 3. Let α ∈ (0, 1) and δ ≤ η. Given a quantum oracle for A ∈ Rm×n (defined in Section 2) +with ∥A∥max ≤ 1, we can solve Problem 1 with probability ≥ 1 − α with +max(Tsamp, Tupdate) = O +� +1 + √n · Tη log4 �mn +δ +� +· +�� +η log +�nηT +α +� ++ η log +�nηT +α +��� +, +and an additive initialization cost of +O +� +η3T 3 log4 +�nηT +δ +� ++ log7 +�nηT +δ +�� +. +Theorem 4. Let A ∈ Rm×n satisfy ∥A∥max ≤ 1, and let ǫ, α ∈ (0, 1). Given a quantum oracle for A +(defined in Section 2), there is a quantum algorithm which yields a classical output (u, v) ∈ ∆m×∆n +that is an ǫ-approximate NE for A with probability ≥ 1 − α in time +O +�√m + n +ǫ2.5 +log4 �mn +ǫ +� +log2.5 �mn +αǫ +� ++ +√m + n +ǫ2 +log4 �mn +ǫ +� +log3 �mn +αǫ +� ++ 1 +ǫ3 log7 �mn +ǫ +�� +. +Proof. We apply two instances of Theorem 3 to implement the δ-approximate Gibbs oracle for +the dynamic vectors −A⊤xt and Ayt, to implement each iteration of Algorithm 1 in amortized +O(1 + Tsamp + Tupdate) time. Using the settings of parameters T, η in Proposition 1 and setting +δ = Θ(ǫ), which suffices for Algorithm 1 and Theorem 3, we have +max(Tsamp, Tupdate) = O +�√m + n +ǫ +log4 �mn +ǫ +� +log +�mn +αǫ +� � +ǫ log +�mn +αǫ +� ++ +� +ǫ log +�mn +αǫ +��� +. +The conclusion follows since, by observation, Algorithm 1 costs O(T · (1 + Tsamp + Tupdate)). As +remarked in the introduction, the additive term in the runtime comes from the cost of stably +implementing a quantum circuit required in the use of Theorem 3 representing a polynomial trans- +formation in finite precision, which we discuss in greater detail in Appendix D. +8 + +3.2 +Dynamic sampling maintenance via dynamic hint maintenance +In this section, we overview our proof of Theorem 3, which proceeds in two steps. +1. We reduce sampling maintenance (Problem 1) to a problem which we call hint maintenance. +This latter problem is a specialization of the sampling maintenance problem where suitable +advice, which we call the hint throughout, is provided. +2. We show how to solve the hint maintenance problem required by Proposition 2 in Theorem 3, +by recursively calling Proposition 2 in phases, allowing us to maintain hints of suitable quality. +Reducing sampling maintenance to hint maintenance. +First, we introduce the following +data structure for maintaining the x variable in Problem 1, which was used crucially in [vAG19] for +dynamic Gibbs sampling. This data structure allows efficient queries to subsets of the coordinates +of x and we use it in our Gibbs sampler as well. +Lemma 1 (Sampler tree). Let η ∈ R≥0 and m ∈ N. There is a classical data structure, SamplerTree, +supporting a tree on O(m) nodes such that [m] corresponds to leaves, with the following operations. +• Init(m, ηfixed): initialize x ← 0m and η ← ηfixed +• Update(i): xi ← xi + η +• SubtreeSum(v): return the sum of all xi, where i is in the subtree of v +The total runtime of T calls to Update is O(T log m), and calls to SubtreeSum cost O(1). +An implementation of SamplerTree based on propagating subtree sums upon updates is standard +classical data structure, and we omit further description for brevity. Next, we state our first building +block towards solving Problem 1, a result which can be thought of as quantum sampling with a hint. +We defer its proof to Appendix B, as it is primarily based on generalizing dynamic block-encoding +strategies with bounded-degree polynomial approximations, as pioneered by [GSLW19, vAG19]. +Proposition 2. Let x ∈ Rm +≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1. Let p be +the Gibbs distribution associated with A⊤x, let Z := � +j∈[n] exp([A⊤x]j) and �Z ∈ [Z, CZ] for some +C ≥ 1. Finally, let q ∈ Rn have entries classically queriable in O(1) time, satisfy q ≥ p entrywise, +qj ∈ [ δ +n, 1] for all j ∈ [n], and ∥q∥1 = ρ. Suppose �Z, C, ρ, and β are explicitly known. Given +a quantum oracle for A ∈ Rm×n (defined in Section 2) with ∥A∥max ≤ 1, we can implement a +δ-approximate Gibbs oracle which has query cost O(√ρC · β log4 � Cmn +δ +� +). The total additional cost +incurred if x undergoes T Update calls which preserve the invariants on �Z, C, ρ, β is O(T log m). +Proposition 2 makes use of an overestimating hint vector q and approximate normalization +constant �Z, which we collectively call the hint. The acceptance probability of our rejection sampling +is governed by two primary parameters: ρ = ∥q∥1, which reflects the degree of overestimation +(and can be thought of as a hint quality), and C ≥ 1, which reflects our inability to accept with +probability pj +qj when p is implicit (which can be thought of as a normalization quality). In particular, +the rejection sampling scheme used in Proposition 2 will instead accept with probability +pj +Cqj .5 +Here we elaborate briefly on the implementation of Proposition 2 (for more details, see Ap- +pendix 4). We follow notation of Proposition 2, and also let w := A⊤x such that the unnormalized +5Exactly computing Z may require time Ω(n) in standard implementations, an obstacle to runtimes ∝ √n. +9 + +Gibbs distribution is exp(w), and p = exp(w) +Z +. Proposition 2 is a rejection sampler which first loads +the hint q into superposition, and then applies a filter. Overall, our scheme has the form +sample j ∼ q +ρ, then accept with probability exp(wj) +CZ · qj += pj +Cqj +, +(1) +which results in an accepted sample with probability ≈ +1 +ρC , and hence requires ≈ √ρC trials to suc- +ceed after applying quantum amplitude amplification, a generalization of Grover search [BHMT02].6 +The latter filtering step is implemented using appropriate block-encoding technology. +The above discussion suggests that the hint and normalization qualities, parameterized by ρ +and C, are crucial in controlling the acceptance probability of our scheme. More concretely, in +our applications of Proposition 2, β = ηT = �O(1 +ǫ ), which is the bound on the ℓ1 norm of the +xt and yt iterates in Algorithm 1 under the parameter settings of Proposition 1. +Overall, the +cost of implementing an approximate Gibbs oracle is then (up to logarithmic factors) √ρC · 1 +ǫ. +Proposition 2 hence reduces Problem 1 to the problem of maintaining the hint consisting of a vector +q and a normalization estimate �Z. We mention that Proposition 2 is a strict generalization of a +corresponding building block in [vAG19], which only used q set to the all-ones vector. +Approaches for Problem 1. +We now overview our improved solution to Problem 1 via efficient +use of Proposition 2. To motivate our solution, we outline three solutions to Problem 1 offering +different tradeoffs in the overall quality ρC. The first only uses classical information and does not +use Proposition 2 at all, the second uses Proposition 2 but maintains no history across iterates, and +the third (building upon the first two) is our approach. +Solution 1: [GK95]. A standard way to solve Problem 1 is to explicitly update w = A⊤x and +exp(w), and exactly maintain the normalizing constant Z. This allows us to sample from p in �O(1) +time. Since w changes by one row of A under a 1-sparse Update operation to x, this is implementable +in O(n) time per iteration. We can view this as an instance of the scheme (1) with q = p, C = 1, +and ρ = 1. It yields the (unbalanced) tradeoff for Problem 1 of Tsamp = �O(1) and Tupdate = O(n). +Solution 2: [vAG19]. A recent work [vAG19] introduced a quantum implementation of the scheme +(1) with an improved tradeoff. The [vAG19] scheme first uniformly samples, which in the language +of (1) means q = 1n and ρ = n. It then applies quantum maximum finding [DH96] to obtain an +approximate maximum entry of w, which they show takes time �O(β · √n); for the sake of simplicity +here, we assume this exactly yields wmax := maxj∈[n] wj. Finally, the acceptance probability +pj +Cqj is +set to exp(wj − wmax). For q = 1n, this translates to +pj · exp(wmax − wj) = exp(wmax) +Z +≤ 1, +implying C = 1 suffices. +We note this bound on C can be tight when w is very non-uniform. +Overall, the [vAG19] scheme’s update time requires maximum finding, and its sampling time (via +Proposition 2) requires time �O(β · √ρC) = �O(β · √n). +For β = �O(1 +ǫ) as in Algorithm 1, this +yields the balanced tradeoff max(Tsamp, Tupdate) = �O +�√n · ǫ−1� +. As discussed earlier, our key in- +sight is to improve upon this specific choice of hint in [vAG19], for their implicit use of Proposition 2. +Solution 3: this work. We design better hints for Proposition 2 by executing our algorithm in phases +corresponding to batches of ≈ 1 +η iterations. At the start of each phase, we use the Gibbs access +6The β in Proposition 2 comes from loading exp(wj) into a quantum oracle via polynomials of degree ≈ β. +10 + +afforded by Proposition 2 to produce a suitable hint for efficiently implementing the next phase. Our +execution of this strategy, parameterized by an integer k ∈ [n], relies on the following observations. +1. During ⌈ 1 +η⌉ iterations t ∈ {τ + s}s∈[⌈ 1 +η ⌉] (where τ starts the phase), the dynamic Gibbs +distribution pt (where t is the iteration index) changes by O(1) multiplicatively, since w +entrywise changes by O(1) additively. Thus, the quality of a hint vector deteriorates by at +most a constant in the phase, so it suffices to give a good hint qτ ≥ pτ at the phase start. +2. By using access to Proposition 2 at the end of the previous phase, we can efficiently estimate +large entries of pτ. +More precisely, we sample �O(k) times from pτ, and let the empirical +distribution of these samples be ˜q. Chernoff bounds show that any large entry [pτ]j = Ω( 1 +k) +will be accurately reflected in the empirical sample. Hence, we set the hint to +qj = +� +˜qj · O(1) +˜qj = Ω( 1 +k) +1 +k · O(1) +˜qj = O( 1 +k) , +for appropriate constants. This yields an improved hint quality of ρ ≈ n +k , since large entries +of the hint sum to at most O(1) (as ˜qj ≈ pj), and small entries sum to O(n +k ). +3. We show a similar strategy of using empirical concentration, combined with a testing variant +of Proposition 2, accurately estimates the normalizing factor Z, yielding C = O(1). +This strategy yields Tsamp = �O(β · +� +n/k) and Tupdate = �O(Tsamp · kη) (since we amortize Tupdate +over ≈ 1 +η iterations). For the parameter settings of Algorithm 1, optimizing k yields +max(Tsamp, Tupdate) = �O +�√n · ǫ− 1 +2 +� +. +We prove Theorem 3, our improved solution to Problem 1, in Section 4. Ignoring logarithmic fac- +tors and assuming η ≪ 1 (as in our setting), Theorem 3 shows we can maintain max(Tsamp, Tupdate) = +�O(√n · Tη1.5). For the parameter settings T = �O(ǫ−2), η = Θ(ǫ), as stated in Proposition 1, this +indeed equates to max(Tsamp, Tupdate) = �O(√n · ǫ− 1 +2). +4 +Gibbs sampling oracle implementation +In this section, we prove Theorem 3, which gives our solution to Problem 1. To do so, we follow the +outline given in Section 3.2, wherein we solve Problem 1 in batches of ⌈ 1 +η⌉ iterations, each of which +we call a “phase.” In Sections 4.1 and 4.2, we only discuss a single phase of Problem 1, consisting +of the iterations τ + s for s ∈ [⌈ 1 +η⌉] and some initial iteration τ, assuming certain invariants (stated +below) hold at the start of the phase. We give a complete solution to Problem 1 in Section 4.3. +Invariant 1 (Approximate normalization access). We explicitly have �Zprev with �Zprev ∈ [Zτ, CZτ] +for some C = O(1). +Invariant 2 (Initial sampling maintenance). We have Oτ solving Problem 1 in iteration τ. +The remainder of this section is then organized as follows. +• Section 4.1: We show that assuming Invariants 1 and 2 hold at the start of a phase, we can +perform preprocessing used to construct our hint, consisting of the estimated normalization +�Z and vector q, in an application of Proposition 2. This gives the cost of Tsamp in Problem 1. +11 + +• Section 4.2: We show that at the conclusion of each phase we can maintain Invariants 1 and 2 +for use in the next phase. This gives the cost of Tupdate in Problem 1. +• Section 4.3: We recursively call the subroutine of Sections 4.1 and 4.2 (which solves Problem 1 +for all the iterations τ + s where s ∈ [⌈ 1 +η⌉] for some τ) ≈ ηT times to prove Theorem 3. +4.1 +Preprocessing and approximate Gibbs oracle implementation +In this section, we show how to construct the “hint” q which will be used throughout a phase +(starting in iteration τ) given access to Oτ, and bound ρ = ∥q∥1 which quantifies the quality of our +hint, under the assumption that Invariants 1 and 2 hold in the phase. We first show a multiplicative +stability property of the relevant Gibbs distributions in a phase. +Lemma 2. For all s ∈ [⌈ 1 +η⌉], we have +Zτ+s ∈ +�1 +3Zτ, 3Zτ +� +, and pτ+s ∈ +�1 +9pτ, 9pτ +� +entrywise. +Proof. Let νt := exp(A⊤xt) for all t, such that pt = νt +Zt . We have that for any j ∈ [n], +[ντ+s]j +[ντ]j += exp +�� +A⊤ (xτ+s − xτ) +� +j +� +∈ [exp (− ∥A∥max ∥xτ+s − xτ∥1) , exp (∥A∥max ∥xτ+s − xτ∥1)] +∈ [exp (−ηs) , exp (ηs)] ∈ +�1 +3, 3 +� +. +Similarly, Zτ+s ∈ [1 +3Zτ, 3Zτ], and combining yields the conclusion. +Next, our computation of the overestimating vector q is parameterized by an integer k ∈ [n] +which will be fixed throughout this section and Section 4.2. We will simply set q to be an upscaled +variant of an empirical distribution of roughly k draws from Oτ. +Lemma 3. Let k ∈ [n], α ∈ (0, 1), and suppose δ ≤ +1 +16k. Draw N = Θ(k log nηT +α ) samples from +Oτ for an appropriately large constant, and let ˜q ∈ ∆n be the empirical distribution over these N +samples. Define B := {i ∈ [n] | ˜qi ≥ +1 +2k}. Then for +qj = +� +18˜qj +j ∈ B +18 +k +j ̸∈ B , +with probability ≥ 1 − +α +2⌈ηT⌉, ∥q∥1 = O(n +k ) and q ≥ pτ+s entrywise, for all s ≤ 1 +η. +Proof. The first conclusion ∥q∥1 = O(n +k ) is immediate from the definition of q, since ∥q∥1 ≤ 18 ∥˜q∥1+ +18n +k . In light of Lemma 2 (which holds deterministically), to show the second conclusion, it suffices +to show that with the desired success probability, we have both +2˜qj ≥ [pτ]j for all j ∈ B +(2) +and +2 +k ≥ [pτ]j for all j ̸∈ B. +(3) +Denote α′ := +α +2⌈ηT⌉ for notational convenience, and let ˜p denote the distribution of samples from Oτ, +and recall that ∥˜p − pτ∥1 ≤ +1 +16k. Because we are taking Θ(k log n +α′ ) samples from ˜p, we have by a +standard Chernoff bound that with probability at least 1 − α′ (union bounding over all coordinates +j ∈ [n]), both of the following hold. +12 + +1. For all j ∈ [n] such that ˜pj ≥ +1 +4k, ˜qj ≥ 2˜pj +3 . +2. For all j ∈ [n] such that ˜pj ≤ +1 +4k, ˜qj ≤ +1 +2k. +We condition on these events for the remainder of the proof; we now show (2), (3) in turn. +Proof of (2). To see (2), the second event above implies that if ˜pj ≤ +1 +4k, then j ̸∈ B. Hence, for +all j ∈ B, we have ˜qj ≥ 2˜pj +3 ≥ [pτ]j +2 +since ∥˜p − pτ∥∞ ≤ +1 +16k ≤ 1 +4 ˜pj for all j ∈ B. +Proof of (3). To see (3), suppose for contradiction that j ̸∈ B and [pτ]j > 2 +k. This implies that +˜pj > 1 +k, and hence by the first event above, ˜qj ≥ +1 +2k, contradicting j ̸∈ B. +Corollary 1. Assume that Invariants 1, 2 hold for the phase consisting of iterations τ +s, s ∈ [⌈ 1 +η⌉]. +We can solve Problem 1 for the phase with probability ≥ 1 − +α +2⌈ηT⌉, and +Tsamp := O +��n +k · Tη log4 �mn +δ +�� +. +Proof. We will run the algorithm described in the proof of Lemma 3, and condition on it succeeding, +giving the failure probability. It then suffices to apply Proposition 2 with q defined in Lemma 3. For +this q, we parameterize Proposition 2 with C = O(1) (see Invariant 1), ρ = O(n +k ) (see Lemma 3), +and β = Tη. It is clear the lower bound on entries of q in Proposition 2 holds. +4.2 +Maintaining invariants +We now show how to maintain Invariant 1 at iteration τ ′ := τ + ⌈ 1 +η⌉, for use in the next phase, and +bound the cost of doing so. We note that Invariant 2 follows immediately from our construction in +Corollary 1. First, by combining Lemma 2 with Invariant 1, +Zτ ′ ∈ +� �Zprev +3C , 3 �Zprev +� +. +(4) +This suggests that we may use 3 �Zprev = �Z for the next phase; however, this would lead to an +exponential blowup in the multiplicative range C. To sidestep this, we develop a tester for a hidden +parameter governing a success probability, which will be used to give a refined estimate �Z. We +require the following corollary of Proposition 2, whose proof we defer to Appendix B. +Corollary 2. Following notation of Proposition 2, let R := +�Z +Z . There is a quantum oracle Otest +which can be implemented under T Update calls to x in O(T log m) time, and has query cost +O +�� +ρC · β log4 +�Cmn +ℓδ +�� +. +Furthermore, for explicitly known constants Cℓ and Cu, Otest returns “success” with probability p for +Cℓ +√Rρ ≤ p ≤ +Cu +√Rρ. +Corollary 2 differs from Proposition 2 in that it returns a Boolean-valued answer (as opposed to +a sample from an approximate Gibbs distribution), and has a success probability parameterized by +explicit constants. We now show how to use Corollary 2 to maintain Invariant 1. +13 + +Lemma 4. Assume Invariants 1, 2 hold for the phase consisting of iterations τ + s, s ∈ [⌈ 1 +η⌉], and +suppose C ≥ 4C2 +u +C2 +ℓ +for C = O(1), where Cu and Cℓ are the constants from Corollary 2. Further, +suppose we have obtained q satisfying the conclusion of Lemma 3 (i.e. that the algorithm in Lemma 3 +succeeded). We can determine �Z such that �Z ∈ [Zτ ′, CZτ ′] with probability ≥ 1 − +α +2⌈ηT⌉, in time +O +��n +k · Tη log4 �mn +δ +� +log +�ηT +α +�� +. +Proof. Define �Z0 := 3 �Zprev, R0 := +�Z0 +Zτ′ , and note that �Z0 ∈ [Zτ ′, 9CZτ ′] by Invariant 1 and Lemma 2. +Next, assuming the success of Lemma 3, we have that the success probability p of Otest from +Corollary 2 using the estimate �Z0 satisfies (for the unknown R0 ∈ [1, 9C], and known Cℓ, Cu, ρ) +Cℓ +√R0ρ ≤ p ≤ +Cu +√R0ρ. +For N := 27 log 4⌈ηT⌉ +α +· 3√Cρ +Cℓ , we first run Otest N times and check the number of successes, denoted +by S, which fits within the runtime budget by Corollary 2. By a Chernoff bound, we have that with +probability ≥ 1 − +α +2⌈ηT⌉, we have +54 log 4⌈ηT⌉ +α +· +� +C +R0 +≤ 2 +3pN ≤ S ≤ 4 +3pN ≤ 108 log 4⌈ηT⌉ +α +· Cu +Cℓ +· +� +C +R0 +. +Hence, we can determine the quantity R0 up to a multiplicative factor of 4C2 +u +C2 +ℓ +≤ C, which also +implies the same multiplicative approximation factor for Zτ ′, as desired. +4.3 +Proof of Theorem 3 +Theorem 3. Let α ∈ (0, 1) and δ ≤ η. Given a quantum oracle for A ∈ Rm×n (defined in Section 2) +with ∥A∥max ≤ 1, we can solve Problem 1 with probability ≥ 1 − α with +max(Tsamp, Tupdate) = O +� +1 + √n · Tη log4 �mn +δ +� +· +�� +η log +�nηT +α +� ++ η log +�nηT +α +��� +, +and an additive initialization cost of +O +� +η3T 3 log4 +�nηT +δ +� ++ log7 +�nηT +δ +�� +. +Proof. We first claim that for any k ∈ [n], we can solve Problem 1 with probability ≥ 1 − α and +Tsamp = O +��n +k · Tη log4 �mn +δ +�� +, +Tupdate = O +���n +k · Tη log4 �mn +δ +�� +· kη log +�nηT +α +�� +. +This follows from combining Lemma 3 (amortized over ⌈ 1 +η⌉ iterations), Corollary 1, and Lemma 4, +and taking a union bound over at most ⌈ηT⌉ phases. Here we note that the cost of log m per +iteration to support Update costs to x in Lemma 1, Proposition 2, and Corollary 2 is not dominant. +By choosing k = Θ(max(1, (η log mn +αǫ )−1)), we balance the costs of Tsamp and Tupdate, yielding the +conclusion. We finally note that by picking an appropriate constant in the definition of k, we have +δ ≤ η =⇒ δ ≤ +1 +16k as required by Lemma 3, the only component specifying a bound on δ. +14 + +Acknowledgments +We thank András Gilyén for communication regarding the prior work [vAG19]. AB was supported +in part by the DOE QuantISED grant DE-SC0020360, by the AFOSR under grant FA9550-21- +1-0392, and by the U.S. DOE Office of Science under Award Number DE-SC0020266. +YG was +supported in part by the Stanford MS&E DE&I Research program. YJ was supported in part by a +Stanford Graduate Fellowship and a Danzig-Lieberman Graduate Fellowship. AS was supported in +part by a Microsoft Research Faculty Fellowship, NSF CAREER Award CCF1844855, NSF Grant +CCF-1955039, a PayPal research award, and a Sloan Research Fellowship. KT thanks Ewin Tang +for her expertise on quantum linear algebra and for fielding many of our questions. +References +[Aar15] +Scott Aaronson. Read the fine print. Nature Physics, 11(4):291–293, 2015. +[AK07] +Sanjeev Arora and Satyen Kale. A combinatorial, primal-dual approach to semidefi- +nite programs. In Proceedings of the thirty-ninth annual ACM symposium on Theory +of computing, pages 227–236, 2007. +[AL17] +Zeyuan Allen-Zhu and Yuanzhi Li. Follow the compressed leader: Faster online +learning of eigenvectors and faster MMWU. In Doina Precup and Yee Whye Teh, +editors, Proceedings of the 34th International Conference on Machine Learning, +ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings +of Machine Learning Research, pages 116–125. PMLR, 2017. +[Alm21] +Josh Alman. Limits on the universal method for matrix multiplication. Theory +Comput., 17:1–30, 2021. +[AW21] +Josh Alman and Virginia Vassilevska Williams. A refined laser method and faster +matrix multiplication. In Dániel Marx, editor, Proceedings of the 2021 ACM-SIAM +Symposium on Discrete Algorithms, SODA 2021, Virtual Conference, January 10 +- 13, 2021, pages 522–539. SIAM, 2021. +[BHMT02] +Gilles Brassard, Peter Høyer, Michele Mosca, and Alain Tapp. Quantum amplitude +amplification and estimation. Quantum Computation and Quantum Information, +305:53–74, 2002. +[BS17] +Fernando GSL Brandao and Krysta M Svore. +Quantum speed-ups for solving +semidefinite programs. +In 2017 IEEE 58th Annual Symposium on Foundations +of Computer Science (FOCS), pages 415–426. IEEE, 2017. +[Bub15] +Sébastien Bubeck. Convex optimization: Algorithms and complexity. Foundations +and Trends in Machine Learning, 8(3-4):231–357, 2015. +[BWP+17] +Jacob Biamonte, Peter Wittek, Nicola Pancotti, Patrick Rebentrost, Nathan Wiebe, +and Seth Lloyd. Quantum machine learning. Nature, 549(7671):195–202, 2017. +[CCLW20] +Shouvanik Chakrabarti, Andrew M Childs, Tongyang Li, and Xiaodi Wu. Quantum +algorithms and lower bounds for convex optimization. Quantum, 4:221, 2020. +15 + +[CDST19] +Yair Carmon, John C. Duchi, Aaron Sidford, and Kevin Tian. A rank-1 sketch +for matrix multiplicative weights. In Alina Beygelzimer and Daniel Hsu, editors, +Conference on Learning Theory, COLT 2019, 25-28 June 2019, Phoenix, AZ, USA, +volume 99 of Proceedings of Machine Learning Research, pages 589–623. PMLR, +2019. +[CGL+20] +Nai-Hui Chia, András Gilyén, Tongyang Li, Han-Hsuan Lin, Ewin Tang, and Chun- +hao Wang. Sampling-based sublinear low-rank matrix arithmetic framework for +dequantizing quantum machine learning. In Proceedings of the 52nd Annual ACM +SIGACT symposium on theory of computing, pages 387–400, 2020. +[CJST19] +Yair Carmon, Yujia Jin, Aaron Sidford, and Kevin Tian. Variance reduction for +matrix games. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence +d’Alché-Buc, Emily B. Fox, and Roman Garnett, editors, Advances in Neural Infor- +mation Processing Systems 32: Annual Conference on Neural Information Process- +ing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, +pages 11377–11388, 2019. +[CJST20] +Yair Carmon, Yujia Jin, Aaron Sidford, and Kevin Tian. Coordinate methods for +matrix games. In Sandy Irani, editor, 61st IEEE Annual Symposium on Foundations +of Computer Science, FOCS 2020, Durham, NC, USA, November 16-19, 2020, +pages 283–293. IEEE, 2020. +[CLS21] +Michael B Cohen, Yin Tat Lee, and Zhao Song. Solving linear programs in the +current matrix multiplication time. Journal of the ACM (JACM), 68(1):1–39, 2021. +[DH96] +Christoph Dürr and Peter Høyer. A quantum algorithm for finding the minimum. +CoRR, quant-ph/9607014, 1996. +[DNV20] +Daniel Dadush, Bento Natura, and Làszlò A Vègh. Revisiting tardos’s framework +for linear programming: faster exact solutions using approximate solvers. In Sandy +Irani, editor, 61st IEEE Annual Symposium on Foundations of Computer Science, +FOCS 2020, Durham, NC, USA, November 16-19, 2020, pages 931–942. IEEE, +2020. +[GAW19] +András Gilyén, Srinivasan Arunachalam, and Nathan Wiebe. Optimizing quantum +optimization algorithms via faster quantum gradient computation. In Proceedings of +the Thirtieth Annual ACM-SIAM Symposium on Discrete Algorithms, pages 1425– +1444. SIAM, 2019. +[GK95] +Michael D. Grigoriadis and Leonid G. Khachiyan. A sublinear-time randomized +approximation algorithm for matrix games. Operation Research Letters, 18(2):53– +58, 1995. +[GLG22] +Sevag Gharibian and François Le Gall. Dequantizing the quantum singular value +transformation: Hardness and applications to quantum chemistry and the quantum +pcp conjecture. In Proceedings of the 54th Annual ACM SIGACT Symposium on +Theory of Computing, pages 19–32, 2022. +[GR02] +Lov Grover and Terry Rudolph. Creating superpositions that correspond to effi- +ciently integrable probability distributions. CoRR, abs/quant-ph/0208112, 2002. +16 + +[GSLW19] +András Gilyén, Yuan Su, Guang Hao Low, and Nathan Wiebe. Quantum singular +value transformation and beyond: exponential improvements for quantum matrix +arithmetics. In Moses Charikar and Edith Cohen, editors, Proceedings of the 51st +Annual ACM SIGACT Symposium on Theory of Computing, STOC 2019, Phoenix, +AZ, USA, June 23-26, 2019, pages 193–204. ACM, 2019. +[Haa19] +Jeongwan Haah. Product decomposition of periodic functions in quantum signal +processing. Quantum, 3:190, 2019. +[HHL09] +Aram W Harrow, Avinatan Hassidim, and Seth Lloyd. +Quantum algorithm for +linear systems of equations. Physical review letters, 103(15):150502, 2009. +[Jor05] +Stephen P Jordan. +Fast quantum algorithm for numerical gradient estimation. +Physical review letters, 95(5):050501, 2005. +[JSWZ21] +Shunhua Jiang, Zhao Song, Omri Weinstein, and Hengjie Zhang. A faster algorithm +for solving general lps. In Proceedings of the 53rd Annual ACM SIGACT Symposium +on Theory of Computing, STOC 2021, 2021, pages 823–832, 2021. +[Kar84] +Narendra Karmarkar. A new polynomial-time algorithm for linear programming. +In Proceedings of the sixteenth annual ACM symposium on Theory of computing, +pages 302–311, 1984. +[KP16] +Iordanis Kerenidis and Anupam Prakash. +Quantum recommendation systems. +arXiv preprint arXiv:1603.08675, 2016. +[KP20] +Iordanis Kerenidis and Anupam Prakash. A quantum interior point method for lps +and sdps. ACM Transactions on Quantum Computing, 1(1):1–32, 2020. +[LCW19] +Tongyang Li, Shouvanik Chakrabarti, and Xiaodi Wu. Sublinear quantum algo- +rithms for training linear and kernel-based classifiers. In International Conference +on Machine Learning, pages 3815–3824. PMLR, 2019. +[LMR14] +Seth Lloyd, Masoud Mohseni, and Patrick Rebentrost. Quantum principal compo- +nent analysis. Nature Physics, 10(9):631–633, 2014. +[LS14] +Yin Tat Lee and Aaron Sidford. Path finding methods for linear programming: +Solving linear programs in o (vrank) iterations and faster algorithms for maximum +flow. In 2014 IEEE 55th Annual Symposium on Foundations of Computer Science, +pages 424–433. IEEE, 2014. +[LS19] +Yin Tat Lee and Aaron Sidford. Solving linear programs with sqrt (rank) linear +system solves. arXiv preprint arXiv:1910.08033, 2019. +[LWCW21] +Tongyang Li, Chunhao Wang, Shouvanik Chakrabarti, and Xiaodi Wu. Sublinear +classical and quantum algorithms for general matrix games. In Proceedings of the +AAAI Conference on Artificial Intelligence, volume 35, pages 8465–8473, 2021. +[Nem04] +Arkadi Nemirovski. Prox-method with rate of convergence O(1/t) for variational in- +equalities with lipschitz continuous monotone operators and smooth convex-concave +saddle point problems. SIAM Journal on Optimization, 15(1):229–251, 2004. +17 + +[Nes07] +Yurii Nesterov. Dual extrapolation and its applications to solving variational in- +equalities and related problems. Mathematical Programing, 109(2-3):319–344, 2007. +[Neu28] +John Von Neumann. Zur theorie der gesellschaftsspiele. Mathematische Annalen, +100:295–320, 1928. +[NJLS09] +Arkadi Nemirovski, Anatoli B. Juditsky, Guanghui Lan, and Alexander Shapiro. +Robust stochastic approximation approach to stochastic programming. SIAM J. +Optim., 19(4):1574–1609, 2009. +[Ren88] +James Renegar. A polynomial-time algorithm, based on newton’s method, for linear +programming. Mathematical programming, 40(1):59–93, 1988. +[RML14] +Patrick Rebentrost, Masoud Mohseni, and Seth Lloyd. Quantum support vector +machine for big data classification. Physical review letters, 113(13):130503, 2014. +[SV14] +Sushant Sachdeva and Nisheeth K. Vishnoi. Faster algorithms via approximation +theory. Found. Trends Theor. Comput. Sci., 9(2):125–210, 2014. +[Tan19] +Ewin Tang. A quantum-inspired classical algorithm for recommendation systems. In +Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing, +pages 217–228, 2019. +[vAG19] +Joran van Apeldoorn and András Gilyén. Quantum algorithms for zero-sum games. +CoRR, abs/1904.03180, 2019. +[vAGGdW20a] Joran van Apeldoorn, András Gilyén, Sander Gribling, and Ronald de Wolf. Convex +optimization using quantum oracles. Quantum, 4:220, 2020. +[VAGGdW20b] Joran Van Apeldoorn, András Gilyén, Sander Gribling, and Ronald de Wolf. Quan- +tum sdp-solvers: Better upper and lower bounds. Quantum, 4:230, 2020. +[vdB20] +Jan van den Brand. A deterministic linear program solver in current matrix mul- +tiplication time. In Proceedings of the Thirty-first Annual ACM-SIAM Symposium +on Discrete Algorithms, SODA 2020, 2020, pages 259–278, 2020. +[vdBLL+21] +Jan van den Brand, Yin Tat Lee, Yang P. Liu, Thatchaphol Saranurak, Aaron +Sidford, Zhao Song, and Di Wang. Minimum cost flows, mdps, and ℓ1-regression +in nearly linear time for dense instances. In Proceedings of the 53rd Annual ACM +SIGACT Symposium on Theory of Computing, STOC 2021, 2021, pages 859–869, +2021. +[vdBLSS20] +Jan van den Brand, Yin Tat Lee, Aaron Sidford, and Zhao Song. Solving tall dense +linear programs in nearly linear time. In Proceedings of the 52nd Annual ACM +SIGACT Symposium on Theory of Computing, pages 775–788, 2020. +18 + +A +Solving matrix games with a Gibbs sampling oracle +In this section, we prove Proposition 1, which shows how to solve a zero-sum matrix game using +an approximate Gibbs sampling oracle (via Algorithm 1). To briefly motivate the algorithm we use +and our proof of its guarantees, we recall the problem we consider is of the form +min +v∈∆n max +u∈∆m f(u, v) := u⊤Av, +where +∥A∥max ≤ 1, +(5) +and we define the associated gradient operator as +g(u, v) = (−Av, A⊤u). +(6) +Taking (stochastic) mirror descent steps on the gradient operator in (5) is well-known to yield an +approximate NE to the matrix game [Bub15]. We show that an approximate implementation of this +strategy, combined with appropriate subsampling, efficiently yields an approximate NE. We begin +by making the following observation. +Lemma 5. Let u, ˜u ∈ ∆m have ∥u − ˜u∥1 ≤ δ. Let ˜g := Ai: where i ∼ ˜u, and g := A⊤u. Then, +∥g − E˜g∥∞ ≤ δ. +Proof. Note that E˜g = A⊤˜u, and +��A⊤(u − ˜u) +�� +∞ ≤ ∥u − ˜u∥1 ≤ δ since ∥A∥max ≤ 1. +We next present a variant of the classical mirror descent analysis, which bounds the expected +approximation quality of iterates of Algorithm 1 prior to subsampling. +Proposition 3. Let δ ≤ +ǫ +20, η = +ǫ +15 and T ≥ 6 log(mn) +ηǫ +in Algorithm 1. Let the iterates of Algorithm 1 +be {xt, yt}T−1 +t=0 , and denote ut := +exp(Ayt) +∥exp(Ayt)∥1 , vt := +exp(−A⊤xt) +∥exp(−A⊤xt)∥1 +for all 0 ≤ t < T. For (¯u, ¯v) := +1 +T +�T−1 +t=0 (ut, vt), we have +E +� +max +u∈∆m u⊤A¯v − min +v∈∆n ¯u⊤Av +� +≤ ǫ. +(7) +Proof. By definition of the updates, at every iteration 0 ≤ t ≤ T − 1, we have +ut+1 = argminu∈∆m + + +η⟨−A:jt, u⟩ + +� +i∈[m] +[u]i log [u]i +[ut]i + + + , +vt+1 = argminv∈∆n + + +η⟨Ait:, v⟩ + +� +j∈[n] +[v]j log [v]j +[vt]j + + + . +Consequently, by the optimality conditions of ut+1 and vt+1 respectively, we have for any u ∈ ∆m, +v ∈ ∆n, and letting Vx(x′) := � +k[x′]k log [x′]k +[x]k be the KL divergence between simplex variables of +appropriate dimension, +⟨−A:j, ut − u⟩ + ⟨Ai:, vt − v⟩ ≤ 1 +η +� +Vut(u) − Vut+1(u) + Vvt(v) − Vvt+1(v) +� ++ +� +⟨−A:j, ut − ut+1⟩ − 1 +η Vut(ut+1) +� ++ +� +⟨Ai:, vt − vt+1⟩ − 1 +η Vvt(vt+1) +� +≤ 1 +η +� +Vut(u) − Vut+1(u) + Vvt(v) − Vvt+1(v) +� ++ η +2 ∥A:j∥2 +∞ + η +2 ∥Ai:∥2 +∞ , +(8) +19 + +where for the last inequality we use Hölder’s inequality and the fact that V is 1-strongly convex in +the ℓ1 norm (by Pinsker’s inequality). Averaging the above for 0 ≤ t < T, and denoting wt := (ut, vt) +and ˜gt := (−A:jt, Ait:), we obtain for any w = (u, v) ∈ ∆m × ∆n, +1 +T +T−1 +� +t=0 +⟨˜gt, wt − w⟩ ≤ 1 +ηT (Vu0(u) + Vv0(v)) + η. +(9) +In the above, we further recalled the bound ∥A∥max ≤ 1 by assumption. In order to bound the +deviation of the left-hand side from its expectation, we use a “ghost iterate” argument following +[NJLS09, CJST19]. In particular, we define iterates ˜ut, ˜vt as follows: let ˜u0 ← u0, ˜v0 ← v0, and +then for each 0 ≤ t < T, define +˜ut+1 := argminu∈∆m + + +η⟨−Avt + A:jt, ¯u⟩ + +� +i∈[m] +[u]i log [u]i +[˜ut]i + + + , +˜vt+1 := argminv∈∆n + + +η⟨A⊤ut − A:it, ¯v⟩ + +� +j∈[n] +[v]j log [v]j +[˜vt]j + + + , +where i, j above are the same coordinates as were used in defining the updates to ut+1 and vt+1. +By an analogous bound to (8), where we note +��A:jt − A⊤vt +�� +∞ , ∥Aut − Ait:∥∞ ≤ 2, +� +−A⊤vt + A:jt, ˜ut − u +� ++ ⟨Aut − Ait:, ˜vt − v⟩ ≤ 1 +η +� +V˜ut(u) − V˜ut+1(u) + V˜vt(v) − V˜vt+1(v) +� ++ 4η. +Averaging the above for 0 ≤ t < T, and denoting ˜wt := (˜ut, ˜vt) and gt := g(wt) (see (5)), we obtain +for any w = (u, v) ∈ ∆m × ∆n, +1 +T +� +t∈[T]−1 +⟨gt − ˜gt, ˜wt − w⟩ ≤ 1 +ηT (Vu0(u) + Vv0(v)) + 4η. +(10) +Summing inequalities (9) and (10), and maximizing over w = (u, v) ∈ ∆m × ∆n, we have +max +w∈∆m×∆n +1 +T +T−1 +� +t=0 +⟨gt, wt − w⟩ ≤ +max +u∈∆n,v∈∆m +2 +ηT (Vu0(u) + Vv0(v)) ++ 5η + 1 +T +T−1 +� +t=0 +⟨gt − ˜gt, wt − ˜wt⟩. +(11) +Taking expectations over the above, we have +E +� +max +w∈∆m×∆n +1 +T +T−1 +� +t=0 +⟨gt, wt − w⟩ +� +≤ +max +u∈∆n,v∈∆m +2 +ηT [Vu0(u) + Vv0(v)] ++ 5η + E +� +1 +T +T−1 +� +t=0 +⟨gt − ˜gt, wt − ˜wt⟩ +� +(i) +≤ 2 log(mn) +ηT ++ 5η + 1 +T +� +t∈[T]−1 +⟨gt − E˜gt, wt − ¯wt⟩, +(ii) +≤ 2 log(mn) +ηT ++ 5η + 4δ +(iii) +≤ ǫ. +20 + +In the above, (i) used the diameter bound of the KL divergence from the uniform distribution, i.e. +maxu∈∆m Vu0(u) = log m (and a similar bound for Vv0(v)). Further, (ii) uses that ˜gt is conditionally +independent of wt and ˜wt, and by the assumption on the Gibbs sampler ∥gt − E˜gt∥∞ ≤ δ (via +Lemma 5), and Hölder, and (iii) uses our choices of T, η and δ. +Finally, we note that the desired claim follows by linearity: for any w = (u, v), +1 +T +T−1 +� +t=0 +⟨gt, wt − w⟩ = +� +g +� +1 +T +T−1 +� +t=0 +wt +� +, 1 +T +T−1 +� +t=0 +wt − w +� += u⊤A¯v − ¯u⊤Av. +By using a simple martingale argument (inspired by those in [AL17, CDST19]) to bound the +error term in (11), we show that the guarantee of Proposition 3 holds with high probability. +Corollary 3. Let α ∈ (0, 1), and let δ ≤ +ǫ +20, η = +ǫ +20 and T ≥ 8 log(mn) +ηǫ ++ +2048 log 1 +α +ǫ2 +in Algorithm 1. +Then with probability at least 1−α, following notation of Proposition 3, (¯u, ¯v) are an ǫ-approximate +NE for A. +Proof. Consider the filtration given by Ft = σ(u0, v0, ˜g0, · · · , ˜gt, ut+1, vt+1). +We will bound the +terms �T−1 +t=0 ⟨gt − ˜gt, wt − ¯wt⟩ in (7). To do so, we define a martingale difference sequence of the +form Dt := ⟨gt − ˜gt, wt − ¯wt⟩ − ⟨gt − E [˜gt|Ft−1] , wt − ¯wt⟩ which is adapted to the filtration Ft. We +first note that Dt ≤ ∥gt−1 − ˜gt−1∥∞ ∥wt−1 − ¯wt−1∥1 ≤ 8 with probability 1. Consequently, applying +the Azuma-Hoeffding inequality yields +T−1 +� +t=0 +Dt ≤ +� +128T log 1 +α with probability ≥ 1 − α. +Plugging this back into (11) and using the KL divergence range bound, Lemma 5 with our definition +of Ogibbs, and choices of parameters, we thus have with probability 1 − α, +max +w∈∆m×∆n +1 +T +T−1 +� +t=0 +⟨gt, wt − w⟩ ≤ 2 log mn +ηT ++ 5η + 4δ + +� +128 log 1 +α +T +≤ ǫ. +(12) +The remainder of the proof follows analogously to Proposition 3. +The Gibbs sampling oracles implicitly maintain access to ut ∝ exp(Ayt) and vt ∝ exp(−A⊤xt), +which by averaging gives (¯u, ¯v) = +1 +T +�T−1 +t=0 (ut, vt) as one approximate equilibrium as guaranteed +in Corollary 3. To turn the implicitly maintained iterates into an actual classic output, we subsample +the iterates. Below we formally show one can take the empirical average of independent samples +from distributions close to ¯u and ¯v to also obtain an approximate equilibrium (with the same +approximation factor up to constant factors) with high probability. +Lemma 6. Suppose ¯u = 1 +T +�T−1 +t=0 ut for {ut}T−1 +t=0 ⊂ ∆m and ¯v = 1 +T +�T−1 +t=0 vt for {vt}T−1 +t=0 ⊂ ∆n are an +ǫ-approximate NE for A. Further suppose that for some δ ∈ (0, 1), {˜ut}T−1 +t=0 ⊂ ∆m, {˜vt}T−1 +t=0 ⊂ ∆n, +and for all 0 ≤ t < T − 1, we have ∥˜ut − ut∥1 ≤ δ and ∥˜vt − vt∥1 ≤ δ. Let ˆu = 1 +T +�T−1 +t=0 eit where +each eit ∈ Rm is sampled independently according to ˜ut; similarly, let ˆv = 1 +T +�T−1 +t=0 ejt where each +ejt ∈ Rn is sampled independently according to ˜vt. Suppose T ≥ +16 log mn +α +ǫ2 +. Then with probability at +least 1 − α, (ˆu, ˆv) are a (2ǫ + 2δ)-approximate NE for A. +21 + +Proof. First, let ˜uavg = +1 +T +�T−1 +t=0 ˜ut and ˜vavg = +1 +T +�T−1 +t=0 ˜vt. +By convexity of norms, we have +∥˜uavg − ¯u∥1 ≤ δ and ∥˜vavg − ¯v∥1 ≤ δ, and hence under the NE approximation guarantee of (¯u, ¯v) +and Hölder’s inequality, +max +u∈∆m u⊤A˜vavg − min +v∈∆m ˜u⊤ +avgAv ≤ ǫ + 2δ. +Let z be a fixed vector in [−1, 1]n. By Hoeffding’s inequality, since each random variable ⟨z, ejt⟩ lies +in the range [−1, 1] and Eˆv = ˜vavg, we have that +Pr +� +|⟨z, ˆv − ˜vavg⟩| ≥ ǫ +2 +� +≤ 2 exp +� +−Tǫ2 +8 +� +≤ +α +m + n. +(13) +Next, note that maxu∈∆m u⊤A˜vavg is achieved by a basis vector u = ei. Hence, applying a union +bound over (13) for all z = Ai: shows that with probability at least 1 − +αm +m+n, +max +u∈∆m u⊤Aˆv ≤ max +u∈∆m u⊤A˜vavg + ǫ +2. +By symmetry, with probability at least 1 − +αn +m+n, +min +v∈∆n ˆu⊤Av ≥ min +v∈∆n ˜u⊤ +avgAv − ǫ +2. +The conclusion follows from a union bound, and combining the above three displays. +Finally, we put these pieces together to give a complete guarantee. +Proposition 1. Let A ∈ Rm×n satisfy ∥A∥max ≤ 1 and ǫ, α ∈ (0, 1). Let δ ≤ +ǫ +20, η = +ǫ +60, and +T = Θ(ǫ−2 log mn +α ) for an appropriate constant. With probability ≥ 1 − α, Algorithm 1 outputs an +ǫ-approximate NE for A. +Proof. We follow notation of Proposition 3. By applying Corollary 3 (up to constant factors), we +have that with probability at least 1 − α +2 , ¯u := 1 +T +�T−1 +t=0 ut and ¯v := 1 +T +�T−1 +t=0 vt satisfy +max +u∈∆m u⊤A¯v − min +v∈∆n ¯u⊤Av ≤ ǫ +3. +Finally, Lemma 6 (with failure probability α +2 ) and a union bound yields the desired conclusion. +B +Quantum rejection sampling with a hint +In this section, we prove Proposition 2, which gives a dynamic quantum rejection sampling subrou- +tine and bounds its cost of implementation. Our result is an extension of analogous developments +in [vAG19], but are stated more generally to allow for the use of an appropriate “hint” vector in the +rejection sampling procedure. We build up to our main result in several pieces. +Amplitude amplification. +First, for a quantum decision algorithm which applies unitary U and +then measures, yielding an accepting state with probability α, quantum amplification [BHMT02] +shows we can apply U ≈ α− 1 +2 times to obtain an accepting state with high probability. +Proposition 4 (Theorem 3, [BHMT02]). Let S ⊆ {0, 1}s, let U be a s-qubit quantum oracle, and +let α be the probability that measuring the result of applying U yields an accepting state. There +is a (quantum) algorithm using O(α− 1 +2 log 1 +δ ) queries to U and O(log s log 1 +δ) additional time that +returns s with s ∈ S with probability ≥ 1 − δ. +22 + +Loading from trees. +Given a dynamic vector x ∈ Rm +≥0 which is supported in an appropriate +efficient data structure SamplerTree (see Lemma 1), and a known bound β ≥ ∥x∥1, we recall a result +of [GR02] which allows us to form a superposition of the entries in x (suitably rescaled). +Lemma 7. Let x ∈ Rm +≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1. We can maintain +a quantum oracle OSamplerTree which takes O(log m) time to apply, such that the total cost of building +OSamplerTree after T calls to Update is O(T log m), and +OSamplerTree|0⟩⊗(a+1) = +� +i∈[m] +�xi +β |0⟩|i⟩ + +� +1 − ∥x∥1 +β +|1⟩|g⟩. +Proof. This is implicit in [GR02]. We first apply a 1-qubit gate to condition on selecting from the +tree (with probability ∥x∥1 +β ), and then apply the [GR02] procedure conditioned on the first qubit +being |0⟩, which controls for one qubit at a time while propagating subtree sums (provided by +SamplerTree via SubtreeSum). The cost to build the circuit follows because on an Update we need +to change the gates corresponding to the relevant leaf-to-root path. +Corollary 4. Let x ∈ Rm +≥0 correspond to an instance of SamplerTree, and let β ≥ ∥x∥1, and suppose +A ∈ Rm×n has ∥A∥max ≤ 1. We can maintain a quantum oracle OA⊤x which takes O(log m) time +to apply, with total building cost O(T log m) after T calls to Update, such that for any j ∈ [n], +OA⊤x|0⟩⊗(a+2)|j⟩ = |0⟩ + + � +i∈[m] +� +Aijxi +β +|0⟩|i⟩ + |1⟩|g⟩ + + |j⟩. +Proof. We apply O′ +A (see Section 2) to the output of OSamplerTree, ignoring the additional qubit. +We remark here that the additional qubit in Corollary 4 will shortly become useful in constructing +an appropriate block-encoding of a scaling of diag +� +A⊤x +� +. +Polynomial approximation. +In order to give approximate Gibbs samplers for the types of dy- +namic vectors Algorithm 1 encounters, we further require some tools from polynomial approximation +theory. We first state a helper result on boundedly approximating the exponential, a variant of which +was also used in [vAG19]. We provide a proof in Appendix C. +Lemma 8 (Lemma 7, [vAG19]). Let β ≥ 1, ξ ≤ +1 +10. There is a polynomial Pβ,ξ of degree O(β log 1 +ξ ) +such that maxx∈[−1,1] |Pβ,ξ(x)| ≤ 3 and maxx∈[−1,0] |Pβ,ξ(x) − exp(βx)| ≤ ξ. +Next, we state a further corollary of Lemma 8 to be used in our rejection sampler. +Corollary 5. Let B, δ ≥ 0 and suppose v ∈ Rn has ∥v∥∞ ≤ B. Further, suppose for some c ≥ 0, +−c ≤ maxj∈[n] vj ≤ 0. Let q ∈ Rn +≥0 satisfy qj ∈ [ℓ, 1] entrywise. Finally, define uj := vj +2B entrywise. +There is a degree-∆ polynomial P, for ∆ = O(B · (c + log n +ℓδ)), such that for wj := P(uj)2qj and +zj := exp(2Buj)qj entrywise, +���� +w +∥w∥1 +− +z +∥z∥1 +���� +1 +≤ δ. +(14) +Moreover, maxx∈[−1,1] |P(x)| ≤ 1 +2, and ∥w∥1 ≥ 1−δ +36 ∥z∥1. +23 + +Proof. Assume δ ≤ 2 else the statement is clearly true. First, uj ∈ [− 1 +2, 0] entrywise by the stated +assumptions (since vj ∈ [−B, 0] entrywise). Let Pβ,ξ(·) be the polynomial given by Lemma 8 which +ξ-approximates exp(β·) on [− 1 +2, 0]. We define +P(u) := 1 +6PB,ξ (u) , for ξ := +δℓ +6n exp(c). +The degree bound and absolute value bound of this polynomial follows immediately from Lemma 8, +so it remains to show the distance bound. The guarantees of Lemma 8 then imply for all j ∈ [n], +|6P(uj) − exp (Buj)| ≤ ξ. +(15) +We further have that uj ≤ 0, so exp(Buj) ≤ 1. Hence, we also have +|6P(uj) + exp (Buj)| ≤ 2 + ξ ≤ 3. +Combining yields for all j ∈ [n], +��36P(uj)2 − exp (2Buj) +�� ≤ 3ξ. +(16) +Next, let yj := 36wj for all j ∈ [n], and note that +y +∥y∥1 = +w +∥w∥1. We bound +���� +w +∥w∥1 +− +z +∥z∥1 +���� +1 += +� +j∈[n] +���� +yj +∥y∥1 +− +zj +∥z∥1 +���� ≤ +� +j∈[n] +���� +yj +∥y∥1 +− +yj +∥z∥1 +���� + +� +j∈[n] +���� +yj +∥z∥1 +− +zj +∥z∥1 +���� +≤ +����1 − ∥y∥1 +∥z∥1 +���� + ∥y − z∥1 +∥z∥1 +≤ 2 ∥y − z∥1 +∥z∥1 +. +(17) +By using the definitions of y, z and (16), as well as the assumed ranges on q, +∥y − z∥1 ≤ 3nξ, ∥z∥1 ≥ ℓ exp(−c). +The second inequality used that some vj = 2Buj is at least −c by assumption. Combining the above +display with (17) and the definition of ξ concludes the proof of (14). Finally, using the bounds on +∥y − z∥1 , ∥z∥1 above shows that +∥w∥1 = 1 +36∥y∥1 ≥ 1 − δ +36 ∥z∥1. +Block-encoding. +Our approximate Gibbs oracle follows an implementation strategy pioneered by +[GSLW19] termed “block-encoding.” Specifically, we follow [GSLW19] and say that U, an (a + ℓ)- +qubit quantum gate, is an ℓ-bit block-encoding of M if the top-left 2a × 2a submatrix of U is M. +Block-encoded matrices admit efficient composable operations, such as the application of linear +combinations and bounded polynomials. We summarize these properties in the following. +Proposition 5 (Lemma 52, [GSLW19]). Let U1 and U2 be ℓ-bit block-encodings of M1, M2 of the +same size. There is an O(ℓ)-bit block-encoding of 1 +2M1 + 1 +2M2 which takes the same asymptotic +time to apply as applying U1 and U2. +Proposition 6 (Theorem 56, [GSLW19]). Let U be an ℓ-bit block-encoding of M, and P : [−1, 1] → +[− 1 +2, 1 +2] be a degree-∆ polynomial. There is an O(ℓ)-bit block-encoding of P(M) which can be applied +in O(∆) applications of U and U† and O(ℓ∆) additional time. +24 + +We also demonstrate that an application of Corollary 4 yields a simple block-encoding of +diag +� +A⊤x +β +� +. A similar construction previously appeared in [vAG19]. +Corollary 6. Let x ∈ Rm +≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1. Let M := +diag +� +A⊤x +β +� +and U := O∗ +A⊤x(SWAP12 ⊗ I)OA⊤x, where SWAP12 swaps the first two qubits and +OA⊤x is from Corollary 4. Then U is a block-encoding of M, and can be applied in time O(log m), +with total building cost O(T log m) after T calls to Update. +Proof. Define wij := Aijxi +β +for convenience. By the definition of OA⊤x, we have that +(SWAP12 ⊗ I) OA⊤x +� +|0⟩⊗(a+2)|j⟩ +� += + +|00⟩ +� +i∈[m] +√wij|i⟩ + |10⟩|g⟩ + + |j⟩. +Hence, for j, j′ ∈ [n], we compute ⟨j′|⟨0|⊗(a+2)U|0⟩⊗(a+2)|j⟩ as: +⟨j′| + +|00⟩ +� +i∈[m] +√wij|i⟩ + |01⟩|g⟩ + + +∗  +|00⟩ +� +i∈[m] +√wij|i⟩ + |10⟩|g⟩ + + |j⟩ += +�� +i∈[m] wij = [A⊤x]j +β +j = j′ +0 +j ̸= j′ . +In particular the |01⟩ and |10⟩ terms disappear, and |j⟩, |j′⟩ are orthogonal unless j = j′. In the +above, we required that √wij∗√wij = wij, which is only true if wij is nonnegative. To bypass this +issue, we will implement the two copies of OA⊤x in slightly different ways, to obtain the correct +signing. For notational clarity, we let OL be the oracle which is conjugated on the left and OR +be the oracle on the right, such that U = (OL)∗(SWAP12 ⊗ I)(OR). Note that x is entrywise +nonnegative and β > 0, and hence the only factor determining the sign of wij is Aij. +When +Aij ≥ 0, we will define the oracles O′ +A used to load +� +Aij for OL and OR in a consistent way +(i.e. use the same-signed square root), so that √wij2 = wij. When Aij < 0 we will define them +in an inconsistent way, so that after the conjugation operation, −√wij√wij = wij. We have thus +shown that ⟨0|⊗(a+2)U|0⟩⊗(a+2) = M which implies the first conclusion. To see the second, all our +gates are reversible (arithmetic circuits are reversible, and OA is its own inverse), and hence the +complexity of applying O∗ +A⊤x is the same as OA⊤x. +Finally, we put together the pieces and prove Proposition 2, which we use repeatedly throughout +the paper to implement our Gibbs sampling oracles. +Proposition 2. Let x ∈ Rm +≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1. Let p be +the Gibbs distribution associated with A⊤x, let Z := � +j∈[n] exp([A⊤x]j) and �Z ∈ [Z, CZ] for some +C ≥ 1. Finally, let q ∈ Rn have entries classically queriable in O(1) time, satisfy q ≥ p entrywise, +qj ∈ [ δ +n, 1] for all j ∈ [n], and ∥q∥1 = ρ. Suppose �Z, C, ρ, and β are explicitly known. Given +a quantum oracle for A ∈ Rm×n (defined in Section 2) with ∥A∥max ≤ 1, we can implement a +δ-approximate Gibbs oracle which has query cost O(√ρC · β log4 � Cmn +δ +� +). The total additional cost +incurred if x undergoes T Update calls which preserve the invariants on �Z, C, ρ, β is O(T log m). +Proof. Throughout the proof, let δ ← min(1 +2, δ) and B := 4(β + log(Cn +δ )). +Also define ℓ := +δ +n (following notation of Corollary 5). +We first observe that since maxj∈[n][A⊤x]j ≤ log Z ≤ +25 + +maxj∈[n][A⊤x]j + log n, +− log(Cn) ≤ max +j∈[n][A⊤x]j − log +� +�Zqj +� +≤ 0. +Here, the upper bound used that for all j ∈ [n], exp([A⊤x]j − �Zqj) = pj +qj · Z +�Z ≤ 1 by assumption. +Hence, for v := A⊤x − log( �Zq) entrywise, +−c ≤ max +j∈[n] vj ≤ 0 for c := log(Cn). +Next, we note log( �Zq) is entrywise bounded in magnitude by B +2 : +log( �Zqj) ≤ log(CZ) ≤ log +� +n · max +j∈[n] exp([A⊤x]j) +� ++ log C ≤ B +2 , +log( �Zqj) ≥ log Z + log δ +n ≥ min +j∈[n][A⊤x]j − log n +δ ≥ −B +2 . +Define M1 := diag +� +A⊤x +2B +� +and M2 := diag +� +− 1 +2B log( �Zq) +� +. By the calculations above, we have +∥M2∥op ≤ 1 +2, and similarly it is clear that ∥M1∥op ≤ +1 +2 because +��A⊤x +�� +∞ ≤ β. Moreover, by +using Corollary 6 with β ← B, we obtain U1, a block-encoding of M1 applicable in O(log m) time. +Using a similar construction as Corollary 6, since q, B, and �Z are all efficiently classically queriable, +we obtain U2, a block-encoding of M2 applicable in O(1) time. Hence, Proposition 5 yields U, a +block-encoding of +M1 + M2 = diag +� v +2B +� +, +which can be applied in O(log mn) time. Next, let P be the degree-∆ = O(B log Cn +δ ) polynomial +from Corollary 5, parameterized by B, v, c, q, ℓ as defined earlier. +Corollary 5 shows that P : +[−1, 1] → [− 1 +2, 1 +2]. Thus, Proposition 6 then yields U′, a block-encoding of diag +� +P( v +2B ) +� +which can +be applied in O(∆ · log mn) time. Furthermore, since q and ρ are efficiently classically queriable, +we can define a gate Oq which is applicable in O(1) time and acts as +Oq|0⟩⊗(b+1) = |0⟩ +� +j∈[n] +�qj +ρ |j⟩ + |1⟩|g⟩. +Applying U′ to the output of Oq with appropriate ancilla qubits then yields +|0⟩⊗O(1) � +j∈[n] +� +qjP(uj)2 +ρ +|j⟩|gj⟩ + |g′⟩, where uj := vj +2B for all j ∈ [n]. +Post-selecting on the first register being the all-zeroes state and measuring on the register corre- +sponding to j, we see that we obtain a sample j ∈ [n] with probability proportional to qjP(uj)2. By +Corollary 5, conditioned on the sample succeeding, the resulting distribution is δ-close in ℓ1 to the +distribution proportional to q ◦ exp(v) ∝ exp(A⊤x), and hence the result is a δ-approximate Gibbs +oracle. Finally, we bound the query cost of the oracle. Define wj := P(uj)2qj and zj := exp(vj)qj +as in Corollary 5. By definition of v, �Z, +∥z∥1 = +� +j∈[n] +exp +�� +A⊤x +� +j +� +�Z +∈ +� +C−1, 1 +� +. +26 + +Moreover, the last conclusion in Corollary 5 shows ∥w∥1 ≥ +1 +72 ∥z∥1 ≥ (72C)−1. Hence, +� +j∈[n] +qjP(uj)2 +ρ += ∥w∥1 +ρ +≥ +1 +72Cρ. +In other words, we have an oracle which we can apply in time O(∆·log mn) which correctly returns +a sample with probability α ≥ +1 +72Cρ. By applying Proposition 4 to improve the success probability, +we obtain the desired conclusion at a O(√Cρ log 1 +δ ) overhead. +Corollary 2. Following notation of Proposition 2, let R := +�Z +Z . There is a quantum oracle Otest +which can be implemented under T Update calls to x in O(T log m) time, and has query cost +O +�� +ρC · β log4 +�Cmn +ℓδ +�� +. +Furthermore, for explicitly known constants Cℓ and Cu, Otest returns “success” with probability p for +Cℓ +√Rρ ≤ p ≤ +Cu +√Rρ. +Proof. Our oracle Otest is the oracle from Proposition 2, except we will choose a sufficiently small +constant value of δ. It returns “success” when the sample is accepted by the rejection sampler after +boosting by amplitude amplification. Before boosting, the success probability from Proposition 2 +is Θ( 1 +Rρ) where the constants in the upper and lower bounds are explicit. Further, the constants +from Proposition 4 are explicit, and hence boosting by amplitude amplification improves the success +probability to Θ( +1 +√Rρ) with known constant bounds as required by the corollary statement. +C +Bounded approximation to exp on [−1, 1] +Here, we give a proof of a lemma (with slightly different constants) used in the prior work [vAG19]. +This section builds entirely off prior results on polynomial approximation in [GSLW19]; we include +it for completeness because a proof was not given in [vAG19]. As a reminder, we stated and used +the following result earlier when constructing our rejection sampler in Appendix B. +Lemma 8 (Lemma 7, [vAG19]). Let β ≥ 1, ξ ≤ +1 +10. There is a polynomial Pβ,ξ of degree O(β log 1 +ξ ) +such that maxx∈[−1,1] |Pβ,ξ(x)| ≤ 3 and maxx∈[−1,0] |Pβ,ξ(x) − exp(βx)| ≤ ξ. +To obtain the lemma, we will utilize the following result from [GSLW19]. +Proposition 7 (Corollary 66, [GSLW19]). Let x0 ∈ [−1, 1], r ∈ (0, 2], δ ∈ (0, r]. Let f : [x0 − r − +δ, x0 + r + δ] → C be such that f(x0 + x) = � +ℓ≥0 aℓxℓ for all x ∈ [−r − δ, r + δ]. Suppose B > 0 is +such that � +ℓ≥0(r + δ)ℓ|aℓ| ≤ B and let ǫ ∈ (0, +1 +2B ]. There is a polynomial P (see Appendix D for +its numerically stable implementation) of degree O +� 1 +δ log B +ǫ +� +such that +max +x∈[x0−r,x0+r] |f(x) − P(x)| ≤ ǫ and +max +x∈[−1,1] |P(x)| ≤ ǫ + B. +Proof of Lemma 8. We apply Proposition 7 with f(x) := exp(βx) which has a convergent Taylor +series everywhere, and the parameter settings x0 = −1, r = 1, δ = +1 +β, B = e. +We have that +27 + +f(x0 + x) = � +ℓ≥0 exp(−β)βℓ·xℓ +ℓ! += � +ℓ≥0 aℓxℓ with aℓ = exp(−β)βℓ +ℓ! for any integer ℓ ≥ 0. We also +check that our choice of B is valid, via +� +ℓ≥0 +(r + δ)ℓ|aℓ| = exp(−β) +� +ℓ≥0 +� +1 + 1 +β +�ℓ βℓ +ℓ! = exp(−β) +� +ℓ≥0 +(β + 1)ℓ +ℓ! += exp(β + 1 − β) = e. +Hence by Proposition 7, we have for any ξ ≤ +1 +2e, there is a polynomial P of degree O(β log 1 +ξ ) +such that maxx∈[−2,0] | exp(βx) − P(x)| ≤ ǫ and maxx∈[−1,1] | ˜P(x)| ≤ e + 1 +6 + ξ ≤ 3. +D +Numerically stable implementation of polynomial approximation +Throughout this section, let ∆ = O(1 +ǫ log2(mn +ǫ )) be the degree of the polynomial used in the proof +of Proposition 2 in Appendix B (specifically, constructed in the proof of Proposition 2, where we +have C = O(1) and δ = O(ǫ) in our applications). +The polynomial we use is constructed via +a decomposition in the Fourier basis (see Lemmas 57 and 65, [GSLW19]). +It is not immediate +that this polynomial transform can be implemented stably in finite-precision arithmetic, within +the quantum singular value transformation framework of [GSLW19], which is used in the proof +of Proposition 2. However, [Haa19] shows that given such a decomposition in the Fourier basis, +we can obtain a numerically-stable implementation of the polynomial transformation required as a +quantum circuit up to additive error ξ, in time +O +� +∆3 log +�∆ +ξ +�� +. +In our setting (in the proof of Proposition 2), it is straightforward to check that ξ = poly(m, n, ǫ−1). +This construction results in the additive term in Theorem 4. +28 + diff --git a/8tE2T4oBgHgl3EQfPwar/content/tmp_files/load_file.txt b/8tE2T4oBgHgl3EQfPwar/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..454e6bd10ba37bb0c227208b0d39fe504706f8db --- /dev/null +++ b/8tE2T4oBgHgl3EQfPwar/content/tmp_files/load_file.txt @@ -0,0 +1,872 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf,len=871 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='03763v1 [quant-ph] 10 Jan 2023 Quantum Speedups for Zero-Sum Games via Improved Dynamic Gibbs Sampling Adam Bouland Yosheb Getachew Yujia Jin Aaron Sidford Kevin Tian∗ {abouland,yoshebg,yujiajin,sidford}@stanford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='edu, tiankevin@microsoft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='com Abstract We give a quantum algorithm for computing an ǫ-approximate Nash equilibrium of a zero- sum game in a m × n payoff matrix with bounded entries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a standard quantum oracle for accessing the payoff matrix our algorithm runs in time �O(√m + n · ǫ−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5 + ǫ−3) and outputs a classical representation of the ǫ-approximate Nash equilibrium.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This improves upon the best prior quantum runtime of �O(√m + n·ǫ−3) obtained by [vAG19] and the classic �O((m+n)·ǫ−2) runtime due to [GK95] whenever ǫ = Ω((m + n)−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We obtain this result by designing new quantum data structures for efficiently sampling from a slowly-changing Gibbs distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' ∗Work partly completed while at Stanford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1 Introduction There is now a broad family of quantum algorithms for machine learning and fast numerical linear al- gebra [BWP+17], built on many quantum algorithmic primitives, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [BHMT02, HHL09, GSLW19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' More specifically, for a wide range of problems it has been shown how quantum algorithms can (in certain parameter regimes) yield faster runtimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1 These quantum algorithms obtain runtimes which improve upon the dimension dependence of classical algorithms, but often at the cost of a worse dependence on the error tolerance and/or implicit access to the solution (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' query or sampling access for solution entries).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Consequently, this paper is motivated by the following question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To what degree is there an inherent accuracy versus dimension-dependence tradeoff for quantum optimization algorithms?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' What algorithmic techniques improve this tradeoff?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In this paper we consider this question for the fundamental optimization problem of computing ǫ-approximate Nash equilibrium in zero-sum games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our main result is an improved dependence on ǫ for quantum algorithms solving zero-sum games, which is very close to that of its classical counterpart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Further, we show that for our algorithms, obtaining a classical representation of the solution is obtainable at no additional asymptotic cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our work builds upon [vAG19, LCW19], which already took a large and important step towards answering the above question by designing quantum data structures for efficiently implementing algorithms for solving zero-sum games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Interestingly, to obtain our result we provide improved quantum algorithms for solving a dy- namic data structure problem of sampling from a slowly-changing Gibbs distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Such dynamic sampling problems arise as a natural component of stochastic gradient methods for solving zero-sum games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We obtain our speedups by improving a Gibbs sampling subroutine developed in [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We design a new dynamic quantum data structure which performs the necessary Gibbs sampling in time �O(ǫ− 1 2), which is faster than the corresponding �O(ǫ−1) runtime achieved by [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Beyond the intrinsic utility of solving this problem, we hope our improved Gibbs sampler showcases poten- tial algorithmic insights that can be gleaned by seeking improved error dependencies for quantum optimization algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Moreover, we hope this work encourages the study and design of quantum data structures for efficient optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1 Zero-sum games For matrix A ∈ Rm×n its associated zero-sum game is the pair of equivalent optimization problems min u∈∆m max v∈∆n u⊤Av = max v∈∆n min u∈∆m u⊤Av, where ∆k := {x ∈ Rk ≥0 : � i∈[k] xi = 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In such a game, we refer to A as the payoff matrix and view the m and n-dimensional simplices, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' ∆m and ∆n, as the space of distributions over [m] and [n] respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' From this perspective u⊤Av, known as payoff or utility of (u, v), is the expected value of Aij when sampling i ∈ [m] and j ∈ [n] independently from the distributions corresponding to u and v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Thus, a zero-sum game models a two-player game where a minimization player seeks to minimize the payoff while, simultaneously, a maximization player seeks to maximize it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In this paper, we consider the canonical problem of computing an approximate Nash equilibrium of a zero-sum game.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given the payoff matrix A ∈ Rm×n we call a pair (u, v) ∈ ∆m × ∆n an ǫ- approximate Nash equilibrium (NE) for ǫ ∈ R>0 if � max v′∈∆n u⊤Av′ � − � min u′∈∆m(u′)⊤Av � ≤ ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1Note that quantifying the end-to-end speedups obtained by these methods can be subtle due to I/O overheads, different access models [Aar15], and classical de-quantization algorithms [Tan19, CGL+20, GLG22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1 We assume that the payoff matrix A and the error-tolerance are given as input to an algorithm, and that, for simplicity, ∥A∥max ≤ 1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' the largest entry of A has magnitude at most 1 (this is without loss of generality by rescaling A ← ∥A∥−1 max A and ǫ ← ∥A∥−1 max ǫ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The main goal of this paper is to design improved zero-sum game solvers, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' algorithms that compute ǫ-approximate NEs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Zero-sum games are foundational to theoretical computer science, optimization, and economics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The problem of approximately solving zero-sum games is a natural formulation of approximate linear programming (LP) and correspondingly, this problem is a prominent testbed for new optimization techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Over the past decades there have been numerous advances in the computational com- plexity of solving zero-sum games under various assumptions on problem parameter (see Section 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='3 for a survey).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Recent advancements in interior point methods (IPMs) for linear programming, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [vdBLL+21] and references therein (discussed in more detail in Section 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='3), solve zero sum-games in time �O(mn + min(m, n)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2 Further the linear programming algorithm of [vdB20], shows that zero-sum games can be solved deterministically in �O((m+n)ω) time where ω < 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='373 is the current matrix multiplication constant [AW21], or �O((m + n)3) without fast matrix multiplication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In this paper, we primarily focus on sublinear-time algorithms for approximating NEs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A well-known algorithm by [GK95] achieves a runtime of �O((m + n) · ǫ−2), which is the state- of-the-art sublinear runtime amongst classical algorithms, without further problem assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Recently it has been shown that quantum algorithms can yield strikingly runtime improvements for solving zero-sum games and their generalizations [LCW19, vAG19, LWCW21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In particular, in 2019 Li, Chakrabati and Wu [LCW19] gave a quantum algorithm for zero sum games in time �O(√m + n· ǫ−4), and simultaneously van Apeldoorn and Gilyen [vAG19] gave an algorithm running in time �O(√m + n · ǫ−3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' These algorithms yield a quadratic improvement in the dimension dependence of the best classical algorithm, at the cost of a higher error dependence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The algorithms of [LCW19, vAG19, LWCW21] operate using a standard quantum oracle for A (formally stated in Section 2), in which one can query the entries of A in superposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We focus on the algorithm of [vAG19] for the rest of this paper, as we focus on improving error dependence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The [vAG19] algorithm generalizes the classical algorithm of Grigoriadis and Khachiyan [GK95], and obtains a runtime improvement by speeding up a key dynamic Gibbs sampling subroutine required by the [GK95] method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' As we discuss in greater detail in Section 3, van Apeldoorn and Gilyen give a quantum data structure to efficiently perform this sampling in time quadratically faster in the dimension, which lies at the core of their algorithmic speedup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We give a new quantum algorithm for solving zero-sum games which improves upon the runtime of the prior state-of-the-art quantum algorithm, due to [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Theorem 1 (informal, see Theorem 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let A ∈ Rm×n with ∥A∥max ≤ 1, and ǫ ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a quantum oracle for A (defined in Section 2), there is an �O(√m + n · ǫ−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5 + ǫ−3) time algorithm which yields a classical output (u, v) ∈ ∆m × ∆n that is an ǫ-approximate NE with high probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our new algorithm simultaneously improves the best known quantum [vAG19] and classical [GK95] algorithms in the parameter regime where IPMs do not dominate sublinear algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In particular, it is faster than the classical �O((m+n)·ǫ−2) runtime of [GK95] whenever ǫ−1 = �O(m+n), which includes the regime where [GK95] offers advantages over the �O((m + n)ω) runtime of the [vdB20] IPM, as ω < 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This is in contrast to the prior quantum rate of [vAG19], which does not achieve an improvement upon [GK95] in the full parameter range where sublinear algorithms 2We use the �O notation to hide polylogarithmic dependences on problem parameters when convenient for exposi- tion;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' see Section 2 for a more detailed statement of hidden parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In informal theorem statements, we use “with high probability” to indicate a polylogarithmic dependence on the failure probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 2 are currently preferable to IPMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For example, when m ≈ n and (up to logarithmic factors) ǫ ∈ [n−c, n− 1 2 ] where c = 1 2(ω − 1), the rate of [GK95] is favorable to that of [vAG19] and state-of- the-art IPMs [vdB20, CLS21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2 Dynamic Gibbs sampling We obtain the improved error dependence in our zero-sum game solver by producing a new, faster quantum data structure to perform the Gibbs sampling as used in the algorithm of [vAG19], which may be of independent interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Gibbs sampling is a fundamental algorithmic primitive — the basic task is, given vector v ∈ Rn, sample from the probability distribution proportional to exp(v).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Gibbs sampling is used as a subroutine in many quantum and classical optimization algorithms, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [BS17] and follow-up works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In general, quantum algorithms can perform this task more efficiently using amplitude estimation, which can boost the acceptance probability of rejection sampling schemes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This strategy was implemented in [vAG19], which approximate the maximum entry vmax of v using quantum maximum finding [DH96], uniformly sample i ∈ [n], and accept the sample with probability exp(vi −vmax) ≤ 1 using quantum rejection sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We give a more detailed overview of the [vAG19] Gibbs sampler and its complexity analysis in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We give a data structure which quadratically improves the error dependence of the [vAG19] Gibbs sampling subroutine runtime, from �O(√m + n· ǫ−1) per sample to an amortized �O(√m + n · ǫ− 1 2) per sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A key fact which enables this improvement is that the Gibbs distributions one samples from in the zero-sum game solver of [GK95] change slowly over time: the base vector v receives bounded sparse updates in each iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By storing partial information about the Gibbs distribution, namely an efficiently-computable overestimate to its entries which remains valid across many consecutive iterations, we obtain an improved dynamic Gibbs sampler, which we also provide a detailed overview of in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We now define our notion of an approximate Gibbs sampler, and then state the dynamic sampling problem we consider, which arises naturally in zero-sum game algorithms with sublinear runtimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Definition 1 (Approximate Gibbs oracle).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For v ∈ Rn, its associated Gibbs distribution is pv ∈ ∆n such that for all i ∈ [n], [pv]i ∝ exp(vi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We say Ogibbs v is a δ-approximate Gibbs oracle if it samples from ˜p ∈ ∆n with ∥˜p − pv∥1 ≤ δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Problem 1 (Sampling maintenance).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let η > 0, δ ∈ (0, 1), and suppose we have a quantum oracle for A ∈ Rm×n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Consider a sequence of T Update operations to a dynamic vector x ∈ Rm ≥0, each of the form xi ← xi + η for some i ∈ [m].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In the sampling maintenance problem, in amortized Tupdate time per Update we must maintain a δ-approximate Gibbs oracle, Osamp, for A⊤x which is queryable in worst-case time Tsamp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We provide a quantum algorithm for solving Problem 1, which improves upon the runtime implied by the corresponding component in the algorithm of [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Theorem 2 (informal, see Theorem 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a quantum algorithm which solves Problem 1 with high probability with max(Tsamp, Tupdate) = �O �√n · Tη1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5� and an initialization cost of �O � η3T 3� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Theorem 2 improves upon the solution to the sampling maintenance Problem 1 implied by [vAG19] by a η− 1 2 factor;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' in the setting of the [GK95] solver, where T = �O(ǫ−2) and η = Θ(ǫ), this is an ǫ− 1 2-factor improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' At a high level, our improvement is obtained by storing a hint consisting of a vector which overestimates the true Gibbs distribution, and an approximate 3There is evidence that ω = 2 cannot be achieved with current techniques, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Alm21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 3 Table 1: Algorithms for computing ǫ-approximate Nash equilibria of zero-sum games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hides polylogarithmic factors and assumes A ∈ Rm×n with ∥A∥max ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Method Query model Total runtime interior point method [CLS21] classical max(m, n)ω interior point method [vdBLL+21] classical mn + min(m, n)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5 extragradient [Nem04, Nes07] classical mn · ǫ−1 stochastic mirror descent (SMD) [GK95] classical (m + n) · ǫ−2 variance-reduced SMD [CJST19] classical mn + � mn(m + n) · ǫ−1 [vAG19] quantum √ m + n · ǫ−3 Theorem 1 (our work) quantum √ m + n · ǫ−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5 + ǫ−3 Table 2: Solutions to Problem 1, T = ǫ−2, η = ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hides polylogarithmic factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Method Query model Tsamp Tupdate explicit updates [GK95] classical 1 m + n max-based rejection sampling [vAG19] quantum √ m + n · ǫ−1 √ m + n · ǫ−1 Theorem 2 (our work) quantum √ m + n · ǫ− 1 2 √ m + n · ǫ− 1 2 normalization factor, which are infrequently updated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our maintained hint satisfies the desirable properties that: (i) it remains valid for a batch of consecutive iterations, and (ii) the degree of overestimation is bounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The former property ensures a fast amortized update time, and the latter ensures a fast sample time by lower bounding the acceptance probability of our quantum rejection sampler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our high-level strategy for maintaining improved hints is to repeatedly call our sampling access to accurately estimate large entries of the Gibbs distribution, and to exploit stability of the distribution under the setting of Problem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We discuss our dynamic Gibbs sampler in more detail and compare it with previous methods for solving Problem 1 in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The initialization cost of Theorem 2 is due to the current state-of-the-art in numerically stable implementations of the quantum singular value transformation (SVT) framework of [GSLW19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This cost is also the cause of the additive �O(ǫ−3) term in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We discuss this cost in Appendix D;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' improvements to numerically stable implementations of [GSLW19] would be reflected in the runtimes of Theorems 1 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='3 Related work Quantum optimization and machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There are a wide array of quantum algorithms for optimization and machine learning which make use of fundamental algorithmic primitives such as amplitude amplification [BHMT02], the HHL algorithm [HHL09], and the quantum singular value transformation [GSLW19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For example, a number of works gave HHL-based algorithms for a variety of machine learning tasks such as PCA [LMR14], SVMs [RML14], and recommendation systems [KP16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For more details see the survey article of [BWP+17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Most relevant to our current work are quantum algorithms for optimization problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For example, Brandao and Svore [BS17] gave a quantum algorithm for SDP solving based on the Arora- 4 Kale algorithm [AK07], which was later improved by [VAGGdW20b].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There have also been quantum IPM-based methods for LPs and SDPs [KP20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Additionally a series of works have considered quantum algorithms for general convex optimization [CCLW20, vAGGdW20a], which make use of Jordan’s algorithm for fast gradient estimation [Jor05, GAW19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In the area of zero-sum games, in addition to the works previously mentioned [vAG19, LCW19] on ℓ1-ℓ1 games (where both players are ℓ1-constrained), there have been several works considering different variants of zero-sum games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For example Li, Chakrabati and Wu [LCW19] gave quan- tum algorithms for ℓ2-ℓ1 games with quadratic improvement on the dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Later Li, Wang, Chakrabati and Wu [LWCW21] extended this algorithm to more general ℓq-ℓ1 games with q ∈ (1, 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Zero-sum games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Zero-sum games are a canonical modeling tool in optimization, economics and machine learning [Neu28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The classic extragradient (mirror prox) method [Nem04, Nes07] computes an ǫ-approximate NE in �O(mn · ǫ−1) time;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' as discussed previously, the stochastic mirror descent method of [GK95] obtains the same accuracy in time �O((m + n) · ǫ−2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' An intermediate runtime was recently obtained by [CJST19] using variance reduction, described in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Improved runtimes are available under more fine-grained characterizations of the matrix A, such as sparsity (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' number of nonzero entries per row or column) or numerical sparsity (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' rows and columns with bounded ℓ1-to-ℓ2 norm ratios) [CJST20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Notably, the [GK95] algorithm also offers runtime improvements under a sparsity assumption, as does the algorithm of [vAG19] in certain sparsity-to-accuracy ratio regimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In this paper, we focus on NE algorithms in the general setting (without further sparsity or numerical sparsity assumptions).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In parallel, a long line of research improving IPMs for solving linear programming [Kar84, Ren88, LS14, LS19, vdBLSS20, JSWZ21] has led to a number of different zero-sum game solvers with polylogarithmic runtime dependencies on the problem accuracy ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The current state-of-the- art variants of IPMs are [CLS21] and [vdBLL+21], which achieve runtimes of �O(max(m, n)ω) and �O(mn + min(m, n)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5) respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We refer readers to Table 1 for detailed comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, for strongly polynomial runtimes (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' with no dependence on ǫ), which are outside the scope of this paper, we refer readers to [DNV20] and references therein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='4 Future work Theorem 1’s ǫ dependence is within an ǫ− 1 2 factor of matching classical counterparts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To the best of our knowledge, removing this ǫ− 1 2 overhead would represent the first quantum algorithm for a natural optimization problem which improves upon classical counterparts across all parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Both our work and [vAG19] solve Problem 1 by leveraging a powerful polynomial approximation- based technique developed in [GSLW19], known as the quantum singular value transform (QSVT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In both cases, QSVT is used with a polynomial of degree �O(ǫ−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We note that in closely-related classi- cal settings (discussed in [SV14]), Chebyshev polynomial-based approximations yield a quadratically smaller degree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' However, a boundedness requirement (due to the spectra of quantum gates) pre- vents straightforwardly applying these constructions within QSVT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Sidestepping this barrier is a natural avenue towards improving our work, which we leave as an open problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' More generally, establishing optimal oracle query complexities of dynamic Gibbs sampling (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Problem 1) and solving zero-sum games are key problems left open by our work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' These questions are potentially more approachable than establishing tight time complexity characterizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For example, could max(Tsamp, Tupdate) be improved to �O(√n) in the context of Theorem 1, or can we rule out such an improvement in the query model?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5 Organization In Section 2 we state the notation used throughout the paper, as well as the (classical and quantum) computational models we assume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Section 3, we give a brief technical overview of the core components of our algorithm used to prove Theorem 1: the stochastic gradient method our method is built on, and an efficient quantum implementation of a key subroutine using a new dynamic Gibbs sampler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally in Section 4 we give our new quantum sampler, and prove Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We aim to give a self-contained, but simplified, description of our algorithm in Section 3 to improve the readability of the paper for readers with an optimization background unfamiliar with quantum computing, and vice versa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In particular, we abstract away the core optimization machin- ery (stochastic mirror descent) and quantum machinery (quantum SVT) developed in prior work into the statements of Propositions 1 and 2, and focus on how we use these statements black-box to build a faster algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The proofs of these statements can be found in Appendices A and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 2 Preliminaries General notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' �O hides logarithmic factors in problem dimensions (denoted m and n), target accuracies (denoted ǫ), and failure probabilities (denoted α).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' When discussing runtimes for Prob- lem 1, we additionally use �O to hide logarithmic factors in the parameters η, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For all i ∈ [n] we let ei ∈ Rn denote the ith standard basis vector for i ∈ [n] when n is clear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' ∥·∥p denotes the ℓp norm of a vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For A ∈ Rm×n, its ith row and jth column are respectively Ai:, A:j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For v ∈ Rn, diag (v) is the diagonal n × n matrix with v as the diagonal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Conjugate transposes of A are denoted A∗;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' when the matrix is real we use A⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The all-ones and all-zeros vectors of dimension n are 1n and 0n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, throughout a := ⌈log2 m⌉ and b := ⌈log2 n⌉, so [m] ⊆ [2a] and [n] ⊆ [2b].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Computation models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We assume entries of A are w-bit reals for w = O(log(mn)), and work in the word RAM model where w-bit arithmetic operations take O(1) time;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' for simplicity, we assume mathematical operations such as trigonometric functions and radicals can also be implemented ex- actly for w-bit words in O(1) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Throughout, “quantum states” mean unit vectors, and “quantum gates” or “oracles” O mean unitary matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We follow standard notation and identify a standard basis vector ei for i ∈ [n] with |i⟩, an a-qubit state, in which i is represented in binary (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' more for- mally, |i⟩ = |bin(i)⟩, and bin is omitted for brevity).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We consider the standard model of quantum access to oracles, in which the oracle O, which is defined by its operation on |s⟩ for all {0, 1}∗- valued s (where length is clear from context), can be queried in superposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' If O is queried on |v⟩ := � s αs|s⟩, the result is O|v⟩ = � s αi(O|s⟩).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We use |g⟩, |g′⟩, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (when clear from context) to denote arbitrary sub-unit vectors, which represent garbage states (unused in computations).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The tensor product of states |u⟩ and |v⟩ on a and b qubits is denoted |u⟩|v⟩, an (a + b)-qubit state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The runtime of a quantum circuit is its maximum depth (in arithmetic gates on w-bit words).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Access model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Throughout the paper, we assume a standard quantum oracle for accessing A (recall ∥A∥max ≤ 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In particular, by a quantum oracle for A we mean an oracle OA which, when queried with |i⟩|j⟩|s⟩ for i ∈ [m], j ∈ [n], s ∈ {0, 1}w, reversibly writes Aij (in binary) to the third register in O(1) time, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' OA|i⟩|j⟩|s⟩ = |i⟩|j⟩|s ⊕ Aij⟩ where ⊕ is bitwise mod-2 addition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a quantum oracle for A, with two queries, by standard constructions one can construct an oracle which places the value in the amplitude of the state rather than the register itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' More 6 formally, one can construct4 an O′ A, which operates as: O′ A|0⟩|i⟩|j⟩ = � Aij|0⟩|i⟩|j⟩ + � 1 − |Aij||1⟩|g⟩, for (i, j) ∈ [m] × [n].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' It is standard in the literature to (using ancilla qubits to store the output register where Aij is written) construct such an O′ A from OA under our classical model of computation, see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [GR02].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For simplicity, we omit discussion of ancilla qubits in the remainder of the paper and assume direct access to O′ A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We also note that there is ambiguity in the implementation of O′ A in that the square root is not unique, and that we have control over the signing used in this implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We will use this flexibility crucially later in the paper, specifically Corollary 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 3 Overview of approach In this section, we give an overview of the approach we take to prove our main results: an improved quantum runtime for solving zero-sum games (Theorem 4) and an improved quantum data structures for dynamic Gibbs sampling (Theorem 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We organize this section as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1, we state Algorithm 1, the optimization method framework we use to solve zero- sum games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This framework is a generalization of the classical algorithm of [GK95].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We state its guarantees in Proposition 1 and defer the proof to Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Algorithm 1 assumes access to an approximate Gibbs oracle (Definition 1) for sampling from dynamic distributions as stated in Problem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The bulk of our work is devoted to obtaining an efficient quantum implementation of such an oracle (Theorem 3) and using this result we prove Theorem 4 at the end of Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2, we overview the main technical innovation of this paper, an improved solution to Problem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Whereas prior work by [vAG19] solves Problem 1 at an amortized ≈ √m + n · ǫ−1 cost per iteration, we show how to solve the problem at an amortized ≈ √m + n · ǫ− 1 2 cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We remark that the only quantum components of our algorithm (quantum SVT and amplitude amplification) are abstracted away by Proposition 2, which is proven in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1 Solving matrix games with a Gibbs sampling oracle Our proof of Theorem 4 uses an efficient implementation of the algorithmic framework stated in Algorithm 1, based on stochastic mirror descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In specifying Algorithm 1, we recall our earlier Definition 1, which captures the approximate sampling access we require for Algorithm 1’s execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Algorithm 1: MatrixGameSolver(δ, η, T) 1 Input: A ∈ Rm×n, desired accuracy ǫ ∈ (0, 1), δ-approximate Gibbs oracles for the (dynamic) vectors −A⊤xt and Ayt 2 Parameters: Gibbs sampler parameter δ ∈ (0, 1), step size η > 0, iteration count T 3 Initialize ˆu ← 0m, ˆv ← 0n, x0 ← 0m, and y0 ← 0n 4 for t = 0 to T − 1 do 5 Independently sample jt, j′ t ∈ [n] using Ogibbs −A⊤xt and it, i′ t ∈ [m] using Ogibbs Ayt 6 Update yt+1 ← yt + ηejt and xt+1 ← xt + ηeit // Update iterates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 7 Update ˆu ← ˆu + 1 T ei′ t and ˆv ← ˆv + 1 T ej′ t // Update output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 8 return (ˆu, ˆv) 4This follows e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' by calling the oracle to obtain the value of Aij in binary (interpreted as a signed number between 0 and 1), adding an ancilla qubit, performing arithmetric to compute the rotation angle needed on that ancilla, applying a tower of controlled rotation gates to an ancilla qubit using that rotation angle express in binary, then calling the standard oracle a second time to uncompute the binary value of Aij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' See e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [GR02] for details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 7 The main skeleton of Algorithm 1 (Lines 5-6) using exact oracles is identical to the method of [GK95].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' However, our framework builds upon [GK95] in the following three ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We tolerate total variation error in the sampling procedure via δ-approximate Gibbs oracles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We provide a high-probability guarantee on the duality gap using martingale arguments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We subsample the output to obtain a sparse solution yielding a comparable duality gap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We remark that several of these improvements have appeared previously, either explicitly or implicitly, in the stochastic gradient method literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For example, an approximation-tolerant stochastic gradient method was given in [CJST20], and our proofs of the high-probability guarantees are based on arguments in [AL17, CDST19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For completeness we give a self-contained proof of the following guarantee on Algorithm 1 in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let A ∈ Rm×n satisfy ∥A∥max ≤ 1 and ǫ, α ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let δ ≤ ǫ 20, η = ǫ 60, and T = Θ(ǫ−2 log mn α ) for an appropriate constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' With probability ≥ 1 − α, Algorithm 1 outputs an ǫ-approximate NE for A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given Proposition 1 to obtain our faster zero-sum game solvers, we simply need to efficiently im- plement the Gibbs sampling in Line 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' As introduced in Section 1, Problem 1, describes a dynamic approximate Gibbs oracle sampling problem sufficient for this task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Indeed, solving two appropriate parameterizations of Problem 1 provides the oracles needed by Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By combining Propo- sition 1 with the following Theorem 3 (our solution to Problem 1, discussed in greater detail in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2), we prove our main result Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let α ∈ (0, 1) and δ ≤ η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a quantum oracle for A ∈ Rm×n (defined in Section 2) with ∥A∥max ≤ 1, we can solve Problem 1 with probability ≥ 1 − α with max(Tsamp, Tupdate) = O � 1 + √n · Tη log4 �mn δ � �� η log �nηT α � + η log �nηT α ��� , and an additive initialization cost of O � η3T 3 log4 �nηT δ � + log7 �nηT δ �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let A ∈ Rm×n satisfy ∥A∥max ≤ 1, and let ǫ, α ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a quantum oracle for A (defined in Section 2), there is a quantum algorithm which yields a classical output (u, v) ∈ ∆m×∆n that is an ǫ-approximate NE for A with probability ≥ 1 − α in time O �√m + n ǫ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5 log4 �mn ǫ � log2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5 �mn αǫ � + √m + n ǫ2 log4 �mn ǫ � log3 �mn αǫ � + 1 ǫ3 log7 �mn ǫ �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We apply two instances of Theorem 3 to implement the δ-approximate Gibbs oracle for the dynamic vectors −A⊤xt and Ayt, to implement each iteration of Algorithm 1 in amortized O(1 + Tsamp + Tupdate) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Using the settings of parameters T, η in Proposition 1 and setting δ = Θ(ǫ), which suffices for Algorithm 1 and Theorem 3, we have max(Tsamp, Tupdate) = O �√m + n ǫ log4 �mn ǫ � log �mn αǫ � � ǫ log �mn αǫ � + � ǫ log �mn αǫ ��� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The conclusion follows since, by observation, Algorithm 1 costs O(T · (1 + Tsamp + Tupdate)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' As remarked in the introduction, the additive term in the runtime comes from the cost of stably implementing a quantum circuit required in the use of Theorem 3 representing a polynomial trans- formation in finite precision, which we discuss in greater detail in Appendix D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 8 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2 Dynamic sampling maintenance via dynamic hint maintenance In this section, we overview our proof of Theorem 3, which proceeds in two steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We reduce sampling maintenance (Problem 1) to a problem which we call hint maintenance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This latter problem is a specialization of the sampling maintenance problem where suitable advice, which we call the hint throughout, is provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We show how to solve the hint maintenance problem required by Proposition 2 in Theorem 3, by recursively calling Proposition 2 in phases, allowing us to maintain hints of suitable quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Reducing sampling maintenance to hint maintenance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' First, we introduce the following data structure for maintaining the x variable in Problem 1, which was used crucially in [vAG19] for dynamic Gibbs sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This data structure allows efficient queries to subsets of the coordinates of x and we use it in our Gibbs sampler as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Lemma 1 (Sampler tree).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let η ∈ R≥0 and m ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a classical data structure, SamplerTree, supporting a tree on O(m) nodes such that [m] corresponds to leaves, with the following operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Init(m, ηfixed): initialize x ← 0m and η ← ηfixed Update(i): xi ← xi + η SubtreeSum(v): return the sum of all xi, where i is in the subtree of v The total runtime of T calls to Update is O(T log m), and calls to SubtreeSum cost O(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' An implementation of SamplerTree based on propagating subtree sums upon updates is standard classical data structure, and we omit further description for brevity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Next, we state our first building block towards solving Problem 1, a result which can be thought of as quantum sampling with a hint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We defer its proof to Appendix B, as it is primarily based on generalizing dynamic block-encoding strategies with bounded-degree polynomial approximations, as pioneered by [GSLW19, vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let x ∈ Rm ≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let p be the Gibbs distribution associated with A⊤x, let Z := � j∈[n] exp([A⊤x]j) and �Z ∈ [Z, CZ] for some C ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, let q ∈ Rn have entries classically queriable in O(1) time, satisfy q ≥ p entrywise, qj ∈ [ δ n, 1] for all j ∈ [n], and ∥q∥1 = ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Suppose �Z, C, ρ, and β are explicitly known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a quantum oracle for A ∈ Rm×n (defined in Section 2) with ∥A∥max ≤ 1, we can implement a δ-approximate Gibbs oracle which has query cost O(√ρC · β log4 � Cmn δ � ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The total additional cost incurred if x undergoes T Update calls which preserve the invariants on �Z, C, ρ, β is O(T log m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 2 makes use of an overestimating hint vector q and approximate normalization constant �Z, which we collectively call the hint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The acceptance probability of our rejection sampling is governed by two primary parameters: ρ = ∥q∥1, which reflects the degree of overestimation (and can be thought of as a hint quality), and C ≥ 1, which reflects our inability to accept with probability pj qj when p is implicit (which can be thought of as a normalization quality).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In particular, the rejection sampling scheme used in Proposition 2 will instead accept with probability pj Cqj .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5 Here we elaborate briefly on the implementation of Proposition 2 (for more details, see Ap- pendix 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We follow notation of Proposition 2, and also let w := A⊤x such that the unnormalized 5Exactly computing Z may require time Ω(n) in standard implementations, an obstacle to runtimes ∝ √n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 9 Gibbs distribution is exp(w), and p = exp(w) Z .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 2 is a rejection sampler which first loads the hint q into superposition, and then applies a filter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Overall, our scheme has the form sample j ∼ q ρ, then accept with probability exp(wj) CZ · qj = pj Cqj , (1) which results in an accepted sample with probability ≈ 1 ρC , and hence requires ≈ √ρC trials to suc- ceed after applying quantum amplitude amplification, a generalization of Grover search [BHMT02].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='6 The latter filtering step is implemented using appropriate block-encoding technology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The above discussion suggests that the hint and normalization qualities, parameterized by ρ and C, are crucial in controlling the acceptance probability of our scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' More concretely, in our applications of Proposition 2, β = ηT = �O(1 ǫ ), which is the bound on the ℓ1 norm of the xt and yt iterates in Algorithm 1 under the parameter settings of Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Overall, the cost of implementing an approximate Gibbs oracle is then (up to logarithmic factors) √ρC · 1 ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 2 hence reduces Problem 1 to the problem of maintaining the hint consisting of a vector q and a normalization estimate �Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We mention that Proposition 2 is a strict generalization of a corresponding building block in [vAG19], which only used q set to the all-ones vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Approaches for Problem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We now overview our improved solution to Problem 1 via efficient use of Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To motivate our solution, we outline three solutions to Problem 1 offering different tradeoffs in the overall quality ρC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The first only uses classical information and does not use Proposition 2 at all, the second uses Proposition 2 but maintains no history across iterates, and the third (building upon the first two) is our approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Solution 1: [GK95].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A standard way to solve Problem 1 is to explicitly update w = A⊤x and exp(w), and exactly maintain the normalizing constant Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This allows us to sample from p in �O(1) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Since w changes by one row of A under a 1-sparse Update operation to x, this is implementable in O(n) time per iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We can view this as an instance of the scheme (1) with q = p, C = 1, and ρ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' It yields the (unbalanced) tradeoff for Problem 1 of Tsamp = �O(1) and Tupdate = O(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Solution 2: [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A recent work [vAG19] introduced a quantum implementation of the scheme (1) with an improved tradeoff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The [vAG19] scheme first uniformly samples, which in the language of (1) means q = 1n and ρ = n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' It then applies quantum maximum finding [DH96] to obtain an approximate maximum entry of w, which they show takes time �O(β · √n);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' for the sake of simplicity here, we assume this exactly yields wmax := maxj∈[n] wj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, the acceptance probability pj Cqj is set to exp(wj − wmax).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For q = 1n, this translates to pj · exp(wmax − wj) = exp(wmax) Z ≤ 1, implying C = 1 suffices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We note this bound on C can be tight when w is very non-uniform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Overall, the [vAG19] scheme’s update time requires maximum finding, and its sampling time (via Proposition 2) requires time �O(β · √ρC) = �O(β · √n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For β = �O(1 ǫ) as in Algorithm 1, this yields the balanced tradeoff max(Tsamp, Tupdate) = �O �√n · ǫ−1� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' As discussed earlier, our key in- sight is to improve upon this specific choice of hint in [vAG19], for their implicit use of Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Solution 3: this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We design better hints for Proposition 2 by executing our algorithm in phases corresponding to batches of ≈ 1 η iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' At the start of each phase, we use the Gibbs access 6The β in Proposition 2 comes from loading exp(wj) into a quantum oracle via polynomials of degree ≈ β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 10 afforded by Proposition 2 to produce a suitable hint for efficiently implementing the next phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our execution of this strategy, parameterized by an integer k ∈ [n], relies on the following observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' During ⌈ 1 η⌉ iterations t ∈ {τ + s}s∈[⌈ 1 η ⌉] (where τ starts the phase), the dynamic Gibbs distribution pt (where t is the iteration index) changes by O(1) multiplicatively, since w entrywise changes by O(1) additively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Thus, the quality of a hint vector deteriorates by at most a constant in the phase, so it suffices to give a good hint qτ ≥ pτ at the phase start.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By using access to Proposition 2 at the end of the previous phase, we can efficiently estimate large entries of pτ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' More precisely, we sample �O(k) times from pτ, and let the empirical distribution of these samples be ˜q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Chernoff bounds show that any large entry [pτ]j = Ω( 1 k) will be accurately reflected in the empirical sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, we set the hint to qj = � ˜qj · O(1) ˜qj = Ω( 1 k) 1 k · O(1) ˜qj = O( 1 k) , for appropriate constants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This yields an improved hint quality of ρ ≈ n k , since large entries of the hint sum to at most O(1) (as ˜qj ≈ pj), and small entries sum to O(n k ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We show a similar strategy of using empirical concentration, combined with a testing variant of Proposition 2, accurately estimates the normalizing factor Z, yielding C = O(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This strategy yields Tsamp = �O(β · � n/k) and Tupdate = �O(Tsamp · kη) (since we amortize Tupdate over ≈ 1 η iterations).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For the parameter settings of Algorithm 1, optimizing k yields max(Tsamp, Tupdate) = �O �√n · ǫ− 1 2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We prove Theorem 3, our improved solution to Problem 1, in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Ignoring logarithmic fac- tors and assuming η ≪ 1 (as in our setting), Theorem 3 shows we can maintain max(Tsamp, Tupdate) = �O(√n · Tη1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For the parameter settings T = �O(ǫ−2), η = Θ(ǫ), as stated in Proposition 1, this indeed equates to max(Tsamp, Tupdate) = �O(√n · ǫ− 1 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 4 Gibbs sampling oracle implementation In this section, we prove Theorem 3, which gives our solution to Problem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To do so, we follow the outline given in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2, wherein we solve Problem 1 in batches of ⌈ 1 η⌉ iterations, each of which we call a “phase.” In Sections 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2, we only discuss a single phase of Problem 1, consisting of the iterations τ + s for s ∈ [⌈ 1 η⌉] and some initial iteration τ, assuming certain invariants (stated below) hold at the start of the phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We give a complete solution to Problem 1 in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Invariant 1 (Approximate normalization access).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We explicitly have �Zprev with �Zprev ∈ [Zτ, CZτ] for some C = O(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Invariant 2 (Initial sampling maintenance).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We have Oτ solving Problem 1 in iteration τ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The remainder of this section is then organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1: We show that assuming Invariants 1 and 2 hold at the start of a phase, we can perform preprocessing used to construct our hint, consisting of the estimated normalization �Z and vector q, in an application of Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This gives the cost of Tsamp in Problem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 11 Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2: We show that at the conclusion of each phase we can maintain Invariants 1 and 2 for use in the next phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This gives the cost of Tupdate in Problem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='3: We recursively call the subroutine of Sections 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2 (which solves Problem 1 for all the iterations τ + s where s ∈ [⌈ 1 η⌉] for some τ) ≈ ηT times to prove Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='1 Preprocessing and approximate Gibbs oracle implementation In this section, we show how to construct the “hint” q which will be used throughout a phase (starting in iteration τ) given access to Oτ, and bound ρ = ∥q∥1 which quantifies the quality of our hint, under the assumption that Invariants 1 and 2 hold in the phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We first show a multiplicative stability property of the relevant Gibbs distributions in a phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For all s ∈ [⌈ 1 η⌉], we have Zτ+s ∈ �1 3Zτ, 3Zτ � , and pτ+s ∈ �1 9pτ, 9pτ � entrywise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let νt := exp(A⊤xt) for all t, such that pt = νt Zt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We have that for any j ∈ [n], [ντ+s]j [ντ]j = exp �� A⊤ (xτ+s − xτ) � j � ∈ [exp (− ∥A∥max ∥xτ+s − xτ∥1) , exp (∥A∥max ∥xτ+s − xτ∥1)] ∈ [exp (−ηs) , exp (ηs)] ∈ �1 3, 3 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Similarly, Zτ+s ∈ [1 3Zτ, 3Zτ], and combining yields the conclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Next, our computation of the overestimating vector q is parameterized by an integer k ∈ [n] which will be fixed throughout this section and Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We will simply set q to be an upscaled variant of an empirical distribution of roughly k draws from Oτ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let k ∈ [n], α ∈ (0, 1), and suppose δ ≤ 1 16k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Draw N = Θ(k log nηT α ) samples from Oτ for an appropriately large constant, and let ˜q ∈ ∆n be the empirical distribution over these N samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Define B := {i ∈ [n] | ˜qi ≥ 1 2k}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Then for qj = � 18˜qj j ∈ B 18 k j ̸∈ B , with probability ≥ 1 − α 2⌈ηT⌉, ∥q∥1 = O(n k ) and q ≥ pτ+s entrywise, for all s ≤ 1 η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The first conclusion ∥q∥1 = O(n k ) is immediate from the definition of q, since ∥q∥1 ≤ 18 ∥˜q∥1+ 18n k .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In light of Lemma 2 (which holds deterministically), to show the second conclusion, it suffices to show that with the desired success probability, we have both 2˜qj ≥ [pτ]j for all j ∈ B (2) and 2 k ≥ [pτ]j for all j ̸∈ B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (3) Denote α′ := α 2⌈ηT⌉ for notational convenience, and let ˜p denote the distribution of samples from Oτ, and recall that ∥˜p − pτ∥1 ≤ 1 16k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Because we are taking Θ(k log n α′ ) samples from ˜p, we have by a standard Chernoff bound that with probability at least 1 − α′ (union bounding over all coordinates j ∈ [n]), both of the following hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 12 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For all j ∈ [n] such that ˜pj ≥ 1 4k, ˜qj ≥ 2˜pj 3 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For all j ∈ [n] such that ˜pj ≤ 1 4k, ˜qj ≤ 1 2k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We condition on these events for the remainder of the proof;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' we now show (2), (3) in turn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof of (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To see (2), the second event above implies that if ˜pj ≤ 1 4k, then j ̸∈ B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, for all j ∈ B, we have ˜qj ≥ 2˜pj 3 ≥ [pτ]j 2 since ∥˜p − pτ∥∞ ≤ 1 16k ≤ 1 4 ˜pj for all j ∈ B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof of (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To see (3), suppose for contradiction that j ̸∈ B and [pτ]j > 2 k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This implies that ˜pj > 1 k, and hence by the first event above, ˜qj ≥ 1 2k, contradicting j ̸∈ B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Assume that Invariants 1, 2 hold for the phase consisting of iterations τ +s, s ∈ [⌈ 1 η⌉].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We can solve Problem 1 for the phase with probability ≥ 1 − α 2⌈ηT⌉, and Tsamp := O ��n k · Tη log4 �mn δ �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We will run the algorithm described in the proof of Lemma 3, and condition on it succeeding, giving the failure probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' It then suffices to apply Proposition 2 with q defined in Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For this q, we parameterize Proposition 2 with C = O(1) (see Invariant 1), ρ = O(n k ) (see Lemma 3), and β = Tη.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' It is clear the lower bound on entries of q in Proposition 2 holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='2 Maintaining invariants We now show how to maintain Invariant 1 at iteration τ ′ := τ + ⌈ 1 η⌉, for use in the next phase, and bound the cost of doing so.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We note that Invariant 2 follows immediately from our construction in Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' First, by combining Lemma 2 with Invariant 1, Zτ ′ ∈ � �Zprev 3C , 3 �Zprev � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (4) This suggests that we may use 3 �Zprev = �Z for the next phase;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' however, this would lead to an exponential blowup in the multiplicative range C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To sidestep this, we develop a tester for a hidden parameter governing a success probability, which will be used to give a refined estimate �Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We require the following corollary of Proposition 2, whose proof we defer to Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Following notation of Proposition 2, let R := �Z Z .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a quantum oracle Otest which can be implemented under T Update calls to x in O(T log m) time, and has query cost O �� ρC · β log4 �Cmn ℓδ �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Furthermore, for explicitly known constants Cℓ and Cu, Otest returns “success” with probability p for Cℓ √Rρ ≤ p ≤ Cu √Rρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 2 differs from Proposition 2 in that it returns a Boolean-valued answer (as opposed to a sample from an approximate Gibbs distribution), and has a success probability parameterized by explicit constants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We now show how to use Corollary 2 to maintain Invariant 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 13 Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Assume Invariants 1, 2 hold for the phase consisting of iterations τ + s, s ∈ [⌈ 1 η⌉], and suppose C ≥ 4C2 u C2 ℓ for C = O(1), where Cu and Cℓ are the constants from Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Further, suppose we have obtained q satisfying the conclusion of Lemma 3 (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' that the algorithm in Lemma 3 succeeded).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We can determine �Z such that �Z ∈ [Zτ ′, CZτ ′] with probability ≥ 1 − α 2⌈ηT⌉, in time O ��n k · Tη log4 �mn δ � log �ηT α �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Define �Z0 := 3 �Zprev, R0 := �Z0 Zτ′ , and note that �Z0 ∈ [Zτ ′, 9CZτ ′] by Invariant 1 and Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Next, assuming the success of Lemma 3, we have that the success probability p of Otest from Corollary 2 using the estimate �Z0 satisfies (for the unknown R0 ∈ [1, 9C], and known Cℓ, Cu, ρ) Cℓ √R0ρ ≤ p ≤ Cu √R0ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For N := 27 log 4⌈ηT⌉ α 3√Cρ Cℓ , we first run Otest N times and check the number of successes, denoted by S, which fits within the runtime budget by Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By a Chernoff bound, we have that with probability ≥ 1 − α 2⌈ηT⌉, we have 54 log 4⌈ηT⌉ α � C R0 ≤ 2 3pN ≤ S ≤ 4 3pN ≤ 108 log 4⌈ηT⌉ α Cu Cℓ � C R0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, we can determine the quantity R0 up to a multiplicative factor of 4C2 u C2 ℓ ≤ C, which also implies the same multiplicative approximation factor for Zτ ′, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='3 Proof of Theorem 3 Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let α ∈ (0, 1) and δ ≤ η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a quantum oracle for A ∈ Rm×n (defined in Section 2) with ∥A∥max ≤ 1, we can solve Problem 1 with probability ≥ 1 − α with max(Tsamp, Tupdate) = O � 1 + √n · Tη log4 �mn δ � �� η log �nηT α � + η log �nηT α ��� , and an additive initialization cost of O � η3T 3 log4 �nηT δ � + log7 �nηT δ �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We first claim that for any k ∈ [n], we can solve Problem 1 with probability ≥ 1 − α and Tsamp = O ��n k · Tη log4 �mn δ �� , Tupdate = O ���n k · Tη log4 �mn δ �� kη log �nηT α �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This follows from combining Lemma 3 (amortized over ⌈ 1 η⌉ iterations), Corollary 1, and Lemma 4, and taking a union bound over at most ⌈ηT⌉ phases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Here we note that the cost of log m per iteration to support Update costs to x in Lemma 1, Proposition 2, and Corollary 2 is not dominant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By choosing k = Θ(max(1, (η log mn αǫ )−1)), we balance the costs of Tsamp and Tupdate, yielding the conclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We finally note that by picking an appropriate constant in the definition of k, we have δ ≤ η =⇒ δ ≤ 1 16k as required by Lemma 3, the only component specifying a bound on δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 14 Acknowledgments We thank András Gilyén for communication regarding the prior work [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' AB was supported in part by the DOE QuantISED grant DE-SC0020360, by the AFOSR under grant FA9550-21- 1-0392, and by the U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' DOE Office of Science under Award Number DE-SC0020266.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' YG was supported in part by the Stanford MS&E DE&I Research program.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' YJ was supported in part by a Stanford Graduate Fellowship and a Danzig-Lieberman Graduate Fellowship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' AS was supported in part by a Microsoft Research Faculty Fellowship, NSF CAREER Award CCF1844855, NSF Grant CCF-1955039, a PayPal research award, and a Sloan Research Fellowship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' KT thanks Ewin Tang for her expertise on quantum linear algebra and for fielding many of our questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' References [Aar15] Scott Aaronson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Read the fine print.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Nature Physics, 11(4):291–293, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [AK07] Sanjeev Arora and Satyen Kale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A combinatorial, primal-dual approach to semidefi- nite programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the thirty-ninth annual ACM symposium on Theory of computing, pages 227–236, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [AL17] Zeyuan Allen-Zhu and Yuanzhi Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Follow the compressed leader: Faster online learning of eigenvectors and faster MMWU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Doina Precup and Yee Whye Teh, editors, Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pages 116–125.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' PMLR, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Alm21] Josh Alman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Limits on the universal method for matrix multiplication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Theory Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=', 17:1–30, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [AW21] Josh Alman and Virginia Vassilevska Williams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A refined laser method and faster matrix multiplication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Dániel Marx, editor, Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms, SODA 2021, Virtual Conference, January 10 13, 2021, pages 522–539.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' SIAM, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [BHMT02] Gilles Brassard, Peter Høyer, Michele Mosca, and Alain Tapp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum amplitude amplification and estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum Computation and Quantum Information, 305:53–74, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [BS17] Fernando GSL Brandao and Krysta M Svore.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum speed-ups for solving semidefinite programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In 2017 IEEE 58th Annual Symposium on Foundations of Computer Science (FOCS), pages 415–426.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' IEEE, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Bub15] Sébastien Bubeck.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Convex optimization: Algorithms and complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Foundations and Trends in Machine Learning, 8(3-4):231–357, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [BWP+17] Jacob Biamonte, Peter Wittek, Nicola Pancotti, Patrick Rebentrost, Nathan Wiebe, and Seth Lloyd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Nature, 549(7671):195–202, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [CCLW20] Shouvanik Chakrabarti, Andrew M Childs, Tongyang Li, and Xiaodi Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum algorithms and lower bounds for convex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum, 4:221, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 15 [CDST19] Yair Carmon, John C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Duchi, Aaron Sidford, and Kevin Tian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A rank-1 sketch for matrix multiplicative weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Alina Beygelzimer and Daniel Hsu, editors, Conference on Learning Theory, COLT 2019, 25-28 June 2019, Phoenix, AZ, USA, volume 99 of Proceedings of Machine Learning Research, pages 589–623.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' PMLR, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [CGL+20] Nai-Hui Chia, András Gilyén, Tongyang Li, Han-Hsuan Lin, Ewin Tang, and Chun- hao Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Sampling-based sublinear low-rank matrix arithmetic framework for dequantizing quantum machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the 52nd Annual ACM SIGACT symposium on theory of computing, pages 387–400, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [CJST19] Yair Carmon, Yujia Jin, Aaron Sidford, and Kevin Tian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Variance reduction for matrix games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Hanna M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d’Alché-Buc, Emily B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Fox, and Roman Garnett, editors, Advances in Neural Infor- mation Processing Systems 32: Annual Conference on Neural Information Process- ing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pages 11377–11388, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [CJST20] Yair Carmon, Yujia Jin, Aaron Sidford, and Kevin Tian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Coordinate methods for matrix games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Sandy Irani, editor, 61st IEEE Annual Symposium on Foundations of Computer Science, FOCS 2020, Durham, NC, USA, November 16-19, 2020, pages 283–293.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' IEEE, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [CLS21] Michael B Cohen, Yin Tat Lee, and Zhao Song.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Solving linear programs in the current matrix multiplication time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Journal of the ACM (JACM), 68(1):1–39, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [DH96] Christoph Dürr and Peter Høyer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A quantum algorithm for finding the minimum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' CoRR, quant-ph/9607014, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [DNV20] Daniel Dadush, Bento Natura, and Làszlò A Vègh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Revisiting tardos’s framework for linear programming: faster exact solutions using approximate solvers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Sandy Irani, editor, 61st IEEE Annual Symposium on Foundations of Computer Science, FOCS 2020, Durham, NC, USA, November 16-19, 2020, pages 931–942.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' IEEE, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [GAW19] András Gilyén, Srinivasan Arunachalam, and Nathan Wiebe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Optimizing quantum optimization algorithms via faster quantum gradient computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the Thirtieth Annual ACM-SIAM Symposium on Discrete Algorithms, pages 1425– 1444.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' SIAM, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [GK95] Michael D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Grigoriadis and Leonid G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Khachiyan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A sublinear-time randomized approximation algorithm for matrix games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Operation Research Letters, 18(2):53– 58, 1995.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [GLG22] Sevag Gharibian and François Le Gall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Dequantizing the quantum singular value transformation: Hardness and applications to quantum chemistry and the quantum pcp conjecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the 54th Annual ACM SIGACT Symposium on Theory of Computing, pages 19–32, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [GR02] Lov Grover and Terry Rudolph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Creating superpositions that correspond to effi- ciently integrable probability distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' CoRR, abs/quant-ph/0208112, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 16 [GSLW19] András Gilyén, Yuan Su, Guang Hao Low, and Nathan Wiebe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum singular value transformation and beyond: exponential improvements for quantum matrix arithmetics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Moses Charikar and Edith Cohen, editors, Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing, STOC 2019, Phoenix, AZ, USA, June 23-26, 2019, pages 193–204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' ACM, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Haa19] Jeongwan Haah.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Product decomposition of periodic functions in quantum signal processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum, 3:190, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [HHL09] Aram W Harrow, Avinatan Hassidim, and Seth Lloyd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum algorithm for linear systems of equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Physical review letters, 103(15):150502, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Jor05] Stephen P Jordan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Fast quantum algorithm for numerical gradient estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Physical review letters, 95(5):050501, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [JSWZ21] Shunhua Jiang, Zhao Song, Omri Weinstein, and Hengjie Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A faster algorithm for solving general lps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the 53rd Annual ACM SIGACT Symposium on Theory of Computing, STOC 2021, 2021, pages 823–832, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Kar84] Narendra Karmarkar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A new polynomial-time algorithm for linear programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the sixteenth annual ACM symposium on Theory of computing, pages 302–311, 1984.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [KP16] Iordanis Kerenidis and Anupam Prakash.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum recommendation systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' arXiv preprint arXiv:1603.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='08675, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [KP20] Iordanis Kerenidis and Anupam Prakash.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A quantum interior point method for lps and sdps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' ACM Transactions on Quantum Computing, 1(1):1–32, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [LCW19] Tongyang Li, Shouvanik Chakrabarti, and Xiaodi Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Sublinear quantum algo- rithms for training linear and kernel-based classifiers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In International Conference on Machine Learning, pages 3815–3824.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' PMLR, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [LMR14] Seth Lloyd, Masoud Mohseni, and Patrick Rebentrost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum principal compo- nent analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Nature Physics, 10(9):631–633, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [LS14] Yin Tat Lee and Aaron Sidford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Path finding methods for linear programming: Solving linear programs in o (vrank) iterations and faster algorithms for maximum flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In 2014 IEEE 55th Annual Symposium on Foundations of Computer Science, pages 424–433.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' IEEE, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [LS19] Yin Tat Lee and Aaron Sidford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Solving linear programs with sqrt (rank) linear system solves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' arXiv preprint arXiv:1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='08033, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [LWCW21] Tongyang Li, Chunhao Wang, Shouvanik Chakrabarti, and Xiaodi Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Sublinear classical and quantum algorithms for general matrix games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 8465–8473, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Nem04] Arkadi Nemirovski.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Prox-method with rate of convergence O(1/t) for variational in- equalities with lipschitz continuous monotone operators and smooth convex-concave saddle point problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' SIAM Journal on Optimization, 15(1):229–251, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 17 [Nes07] Yurii Nesterov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Dual extrapolation and its applications to solving variational in- equalities and related problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Mathematical Programing, 109(2-3):319–344, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Neu28] John Von Neumann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Zur theorie der gesellschaftsspiele.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Mathematische Annalen, 100:295–320, 1928.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [NJLS09] Arkadi Nemirovski, Anatoli B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Juditsky, Guanghui Lan, and Alexander Shapiro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Robust stochastic approximation approach to stochastic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=', 19(4):1574–1609, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Ren88] James Renegar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A polynomial-time algorithm, based on newton’s method, for linear programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Mathematical programming, 40(1):59–93, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [RML14] Patrick Rebentrost, Masoud Mohseni, and Seth Lloyd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum support vector machine for big data classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Physical review letters, 113(13):130503, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [SV14] Sushant Sachdeva and Nisheeth K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Vishnoi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Faster algorithms via approximation theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Trends Theor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=', 9(2):125–210, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [Tan19] Ewin Tang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A quantum-inspired classical algorithm for recommendation systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing, pages 217–228, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [vAG19] Joran van Apeldoorn and András Gilyén.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum algorithms for zero-sum games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' CoRR, abs/1904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='03180, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [vAGGdW20a] Joran van Apeldoorn, András Gilyén, Sander Gribling, and Ronald de Wolf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Convex optimization using quantum oracles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum, 4:220, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [VAGGdW20b] Joran Van Apeldoorn, András Gilyén, Sander Gribling, and Ronald de Wolf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quan- tum sdp-solvers: Better upper and lower bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Quantum, 4:230, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [vdB20] Jan van den Brand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A deterministic linear program solver in current matrix mul- tiplication time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the Thirty-first Annual ACM-SIAM Symposium on Discrete Algorithms, SODA 2020, 2020, pages 259–278, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [vdBLL+21] Jan van den Brand, Yin Tat Lee, Yang P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Liu, Thatchaphol Saranurak, Aaron Sidford, Zhao Song, and Di Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Minimum cost flows, mdps, and ℓ1-regression in nearly linear time for dense instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the 53rd Annual ACM SIGACT Symposium on Theory of Computing, STOC 2021, 2021, pages 859–869, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' [vdBLSS20] Jan van den Brand, Yin Tat Lee, Aaron Sidford, and Zhao Song.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Solving tall dense linear programs in nearly linear time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In Proceedings of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pages 775–788, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 18 A Solving matrix games with a Gibbs sampling oracle In this section, we prove Proposition 1, which shows how to solve a zero-sum matrix game using an approximate Gibbs sampling oracle (via Algorithm 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To briefly motivate the algorithm we use and our proof of its guarantees, we recall the problem we consider is of the form min v∈∆n max u∈∆m f(u, v) := u⊤Av, where ∥A∥max ≤ 1, (5) and we define the associated gradient operator as g(u, v) = (−Av, A⊤u).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (6) Taking (stochastic) mirror descent steps on the gradient operator in (5) is well-known to yield an approximate NE to the matrix game [Bub15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We show that an approximate implementation of this strategy, combined with appropriate subsampling, efficiently yields an approximate NE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We begin by making the following observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let u, ˜u ∈ ∆m have ∥u − ˜u∥1 ≤ δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let ˜g := Ai: where i ∼ ˜u, and g := A⊤u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Then, ∥g − E˜g∥∞ ≤ δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Note that E˜g = A⊤˜u, and ��A⊤(u − ˜u) �� ∞ ≤ ∥u − ˜u∥1 ≤ δ since ∥A∥max ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We next present a variant of the classical mirror descent analysis, which bounds the expected approximation quality of iterates of Algorithm 1 prior to subsampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let δ ≤ ǫ 20, η = ǫ 15 and T ≥ 6 log(mn) ηǫ in Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let the iterates of Algorithm 1 be {xt, yt}T−1 t=0 , and denote ut := exp(Ayt) ∥exp(Ayt)∥1 , vt := exp(−A⊤xt) ∥exp(−A⊤xt)∥1 for all 0 ≤ t < T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For (¯u, ¯v) := 1 T �T−1 t=0 (ut, vt), we have E � max u∈∆m u⊤A¯v − min v∈∆n ¯u⊤Av � ≤ ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (7) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By definition of the updates, at every iteration 0 ≤ t ≤ T − 1, we have ut+1 = argminu∈∆m \uf8f1 \uf8f2 \uf8f3η⟨−A:jt, u⟩ + � i∈[m] [u]i log [u]i [ut]i \uf8fc \uf8fd \uf8fe , vt+1 = argminv∈∆n \uf8f1 \uf8f2 \uf8f3η⟨Ait:, v⟩ + � j∈[n] [v]j log [v]j [vt]j \uf8fc \uf8fd \uf8fe .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Consequently,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' by the optimality conditions of ut+1 and vt+1 respectively,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' we have for any u ∈ ∆m,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' v ∈ ∆n,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' and letting Vx(x′) := � k[x′]k log [x′]k [x]k be the KL divergence between simplex variables of appropriate dimension,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' ⟨−A:j,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' ut − u⟩ + ⟨Ai:,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' vt − v⟩ ≤ 1 η � Vut(u) − Vut+1(u) + Vvt(v) − Vvt+1(v) � + � ⟨−A:j,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' ut − ut+1⟩ − 1 η Vut(ut+1) � + � ⟨Ai:,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' vt − vt+1⟩ − 1 η Vvt(vt+1) � ≤ 1 η � Vut(u) − Vut+1(u) + Vvt(v) − Vvt+1(v) � + η 2 ∥A:j∥2 ∞ + η 2 ∥Ai:∥2 ∞ ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (8) 19 where for the last inequality we use Hölder’s inequality and the fact that V is 1-strongly convex in the ℓ1 norm (by Pinsker’s inequality).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Averaging the above for 0 ≤ t < T, and denoting wt := (ut, vt) and ˜gt := (−A:jt, Ait:), we obtain for any w = (u, v) ∈ ∆m × ∆n, 1 T T−1 � t=0 ⟨˜gt, wt − w⟩ ≤ 1 ηT (Vu0(u) + Vv0(v)) + η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (9) In the above, we further recalled the bound ∥A∥max ≤ 1 by assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In order to bound the deviation of the left-hand side from its expectation, we use a “ghost iterate” argument following [NJLS09, CJST19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In particular, we define iterates ˜ut, ˜vt as follows: let ˜u0 ← u0, ˜v0 ← v0, and then for each 0 ≤ t < T, define ˜ut+1 := argminu∈∆m \uf8f1 \uf8f2 \uf8f3η⟨−Avt + A:jt, ¯u⟩ + � i∈[m] [u]i log [u]i [˜ut]i \uf8fc \uf8fd \uf8fe , ˜vt+1 := argminv∈∆n \uf8f1 \uf8f2 \uf8f3η⟨A⊤ut − A:it, ¯v⟩ + � j∈[n] [v]j log [v]j [˜vt]j \uf8fc \uf8fd \uf8fe , where i, j above are the same coordinates as were used in defining the updates to ut+1 and vt+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By an analogous bound to (8), where we note ��A:jt − A⊤vt �� ∞ , ∥Aut − Ait:∥∞ ≤ 2, � −A⊤vt + A:jt, ˜ut − u � + ⟨Aut − Ait:, ˜vt − v⟩ ≤ 1 η � V˜ut(u) − V˜ut+1(u) + V˜vt(v) − V˜vt+1(v) � + 4η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Averaging the above for 0 ≤ t < T, and denoting ˜wt := (˜ut, ˜vt) and gt := g(wt) (see (5)), we obtain for any w = (u, v) ∈ ∆m × ∆n, 1 T � t∈[T]−1 ⟨gt − ˜gt, ˜wt − w⟩ ≤ 1 ηT (Vu0(u) + Vv0(v)) + 4η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (10) Summing inequalities (9) and (10), and maximizing over w = (u, v) ∈ ∆m × ∆n, we have max w∈∆m×∆n 1 T T−1 � t=0 ⟨gt, wt − w⟩ ≤ max u∈∆n,v∈∆m 2 ηT (Vu0(u) + Vv0(v)) + 5η + 1 T T−1 � t=0 ⟨gt − ˜gt, wt − ˜wt⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (11) Taking expectations over the above, we have E � max w∈∆m×∆n 1 T T−1 � t=0 ⟨gt, wt − w⟩ � ≤ max u∈∆n,v∈∆m 2 ηT [Vu0(u) + Vv0(v)] + 5η + E � 1 T T−1 � t=0 ⟨gt − ˜gt, wt − ˜wt⟩ � (i) ≤ 2 log(mn) ηT + 5η + 1 T � t∈[T]−1 ⟨gt − E˜gt, wt − ¯wt⟩, (ii) ≤ 2 log(mn) ηT + 5η + 4δ (iii) ≤ ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 20 In the above, (i) used the diameter bound of the KL divergence from the uniform distribution, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' maxu∈∆m Vu0(u) = log m (and a similar bound for Vv0(v)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Further, (ii) uses that ˜gt is conditionally independent of wt and ˜wt, and by the assumption on the Gibbs sampler ∥gt − E˜gt∥∞ ≤ δ (via Lemma 5), and Hölder, and (iii) uses our choices of T, η and δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, we note that the desired claim follows by linearity: for any w = (u, v), 1 T T−1 � t=0 ⟨gt, wt − w⟩ = � g � 1 T T−1 � t=0 wt � , 1 T T−1 � t=0 wt − w � = u⊤A¯v − ¯u⊤Av.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By using a simple martingale argument (inspired by those in [AL17, CDST19]) to bound the error term in (11), we show that the guarantee of Proposition 3 holds with high probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let α ∈ (0, 1), and let δ ≤ ǫ 20, η = ǫ 20 and T ≥ 8 log(mn) ηǫ + 2048 log 1 α ǫ2 in Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Then with probability at least 1−α, following notation of Proposition 3, (¯u, ¯v) are an ǫ-approximate NE for A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Consider the filtration given by Ft = σ(u0, v0, ˜g0, · · · , ˜gt, ut+1, vt+1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We will bound the terms �T−1 t=0 ⟨gt − ˜gt, wt − ¯wt⟩ in (7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To do so, we define a martingale difference sequence of the form Dt := ⟨gt − ˜gt, wt − ¯wt⟩ − ⟨gt − E [˜gt|Ft−1] , wt − ¯wt⟩ which is adapted to the filtration Ft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We first note that Dt ≤ ∥gt−1 − ˜gt−1∥∞ ∥wt−1 − ¯wt−1∥1 ≤ 8 with probability 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Consequently, applying the Azuma-Hoeffding inequality yields T−1 � t=0 Dt ≤ � 128T log 1 α with probability ≥ 1 − α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Plugging this back into (11) and using the KL divergence range bound, Lemma 5 with our definition of Ogibbs, and choices of parameters, we thus have with probability 1 − α, max w∈∆m×∆n 1 T T−1 � t=0 ⟨gt, wt − w⟩ ≤ 2 log mn ηT + 5η + 4δ + � 128 log 1 α T ≤ ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (12) The remainder of the proof follows analogously to Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The Gibbs sampling oracles implicitly maintain access to ut ∝ exp(Ayt) and vt ∝ exp(−A⊤xt), which by averaging gives (¯u, ¯v) = 1 T �T−1 t=0 (ut, vt) as one approximate equilibrium as guaranteed in Corollary 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To turn the implicitly maintained iterates into an actual classic output, we subsample the iterates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Below we formally show one can take the empirical average of independent samples from distributions close to ¯u and ¯v to also obtain an approximate equilibrium (with the same approximation factor up to constant factors) with high probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Suppose ¯u = 1 T �T−1 t=0 ut for {ut}T−1 t=0 ⊂ ∆m and ¯v = 1 T �T−1 t=0 vt for {vt}T−1 t=0 ⊂ ∆n are an ǫ-approximate NE for A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Further suppose that for some δ ∈ (0, 1), {˜ut}T−1 t=0 ⊂ ∆m, {˜vt}T−1 t=0 ⊂ ∆n, and for all 0 ≤ t < T − 1, we have ∥˜ut − ut∥1 ≤ δ and ∥˜vt − vt∥1 ≤ δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let ˆu = 1 T �T−1 t=0 eit where each eit ∈ Rm is sampled independently according to ˜ut;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' similarly, let ˆv = 1 T �T−1 t=0 ejt where each ejt ∈ Rn is sampled independently according to ˜vt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Suppose T ≥ 16 log mn α ǫ2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Then with probability at least 1 − α, (ˆu, ˆv) are a (2ǫ + 2δ)-approximate NE for A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 21 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' First, let ˜uavg = 1 T �T−1 t=0 ˜ut and ˜vavg = 1 T �T−1 t=0 ˜vt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By convexity of norms, we have ∥˜uavg − ¯u∥1 ≤ δ and ∥˜vavg − ¯v∥1 ≤ δ, and hence under the NE approximation guarantee of (¯u, ¯v) and Hölder’s inequality, max u∈∆m u⊤A˜vavg − min v∈∆m ˜u⊤ avgAv ≤ ǫ + 2δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let z be a fixed vector in [−1, 1]n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By Hoeffding’s inequality, since each random variable ⟨z, ejt⟩ lies in the range [−1, 1] and Eˆv = ˜vavg, we have that Pr � |⟨z, ˆv − ˜vavg⟩| ≥ ǫ 2 � ≤ 2 exp � −Tǫ2 8 � ≤ α m + n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (13) Next, note that maxu∈∆m u⊤A˜vavg is achieved by a basis vector u = ei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, applying a union bound over (13) for all z = Ai: shows that with probability at least 1 − αm m+n, max u∈∆m u⊤Aˆv ≤ max u∈∆m u⊤A˜vavg + ǫ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By symmetry, with probability at least 1 − αn m+n, min v∈∆n ˆu⊤Av ≥ min v∈∆n ˜u⊤ avgAv − ǫ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The conclusion follows from a union bound, and combining the above three displays.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, we put these pieces together to give a complete guarantee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let A ∈ Rm×n satisfy ∥A∥max ≤ 1 and ǫ, α ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let δ ≤ ǫ 20, η = ǫ 60, and T = Θ(ǫ−2 log mn α ) for an appropriate constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' With probability ≥ 1 − α, Algorithm 1 outputs an ǫ-approximate NE for A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We follow notation of Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By applying Corollary 3 (up to constant factors), we have that with probability at least 1 − α 2 , ¯u := 1 T �T−1 t=0 ut and ¯v := 1 T �T−1 t=0 vt satisfy max u∈∆m u⊤A¯v − min v∈∆n ¯u⊤Av ≤ ǫ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, Lemma 6 (with failure probability α 2 ) and a union bound yields the desired conclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' B Quantum rejection sampling with a hint In this section, we prove Proposition 2, which gives a dynamic quantum rejection sampling subrou- tine and bounds its cost of implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our result is an extension of analogous developments in [vAG19], but are stated more generally to allow for the use of an appropriate “hint” vector in the rejection sampling procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We build up to our main result in several pieces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Amplitude amplification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' First, for a quantum decision algorithm which applies unitary U and then measures, yielding an accepting state with probability α, quantum amplification [BHMT02] shows we can apply U ≈ α− 1 2 times to obtain an accepting state with high probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 4 (Theorem 3, [BHMT02]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let S ⊆ {0, 1}s, let U be a s-qubit quantum oracle, and let α be the probability that measuring the result of applying U yields an accepting state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a (quantum) algorithm using O(α− 1 2 log 1 δ ) queries to U and O(log s log 1 δ) additional time that returns s with s ∈ S with probability ≥ 1 − δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 22 Loading from trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a dynamic vector x ∈ Rm ≥0 which is supported in an appropriate efficient data structure SamplerTree (see Lemma 1), and a known bound β ≥ ∥x∥1, we recall a result of [GR02] which allows us to form a superposition of the entries in x (suitably rescaled).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Lemma 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let x ∈ Rm ≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We can maintain a quantum oracle OSamplerTree which takes O(log m) time to apply, such that the total cost of building OSamplerTree after T calls to Update is O(T log m), and OSamplerTree|0⟩⊗(a+1) = � i∈[m] �xi β |0⟩|i⟩ + � 1 − ∥x∥1 β |1⟩|g⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This is implicit in [GR02].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We first apply a 1-qubit gate to condition on selecting from the tree (with probability ∥x∥1 β ), and then apply the [GR02] procedure conditioned on the first qubit being |0⟩, which controls for one qubit at a time while propagating subtree sums (provided by SamplerTree via SubtreeSum).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The cost to build the circuit follows because on an Update we need to change the gates corresponding to the relevant leaf-to-root path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let x ∈ Rm ≥0 correspond to an instance of SamplerTree, and let β ≥ ∥x∥1, and suppose A ∈ Rm×n has ∥A∥max ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We can maintain a quantum oracle OA⊤x which takes O(log m) time to apply, with total building cost O(T log m) after T calls to Update, such that for any j ∈ [n], OA⊤x|0⟩⊗(a+2)|j⟩ = |0⟩ \uf8eb \uf8ed � i∈[m] � Aijxi β |0⟩|i⟩ + |1⟩|g⟩ \uf8f6 \uf8f8 |j⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We apply O′ A (see Section 2) to the output of OSamplerTree, ignoring the additional qubit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We remark here that the additional qubit in Corollary 4 will shortly become useful in constructing an appropriate block-encoding of a scaling of diag � A⊤x � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Polynomial approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In order to give approximate Gibbs samplers for the types of dy- namic vectors Algorithm 1 encounters, we further require some tools from polynomial approximation theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We first state a helper result on boundedly approximating the exponential, a variant of which was also used in [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We provide a proof in Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Lemma 8 (Lemma 7, [vAG19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let β ≥ 1, ξ ≤ 1 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a polynomial Pβ,ξ of degree O(β log 1 ξ ) such that maxx∈[−1,1] |Pβ,ξ(x)| ≤ 3 and maxx∈[−1,0] |Pβ,ξ(x) − exp(βx)| ≤ ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Next, we state a further corollary of Lemma 8 to be used in our rejection sampler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let B, δ ≥ 0 and suppose v ∈ Rn has ∥v∥∞ ≤ B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Further, suppose for some c ≥ 0, −c ≤ maxj∈[n] vj ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let q ∈ Rn ≥0 satisfy qj ∈ [ℓ, 1] entrywise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, define uj := vj 2B entrywise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a degree-∆ polynomial P, for ∆ = O(B · (c + log n ℓδ)), such that for wj := P(uj)2qj and zj := exp(2Buj)qj entrywise, ���� w ∥w∥1 − z ∥z∥1 ���� 1 ≤ δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (14) Moreover, maxx∈[−1,1] |P(x)| ≤ 1 2, and ∥w∥1 ≥ 1−δ 36 ∥z∥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 23 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Assume δ ≤ 2 else the statement is clearly true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' First, uj ∈ [− 1 2, 0] entrywise by the stated assumptions (since vj ∈ [−B, 0] entrywise).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let Pβ,ξ(·) be the polynomial given by Lemma 8 which ξ-approximates exp(β·) on [− 1 2, 0].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We define P(u) := 1 6PB,ξ (u) , for ξ := δℓ 6n exp(c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The degree bound and absolute value bound of this polynomial follows immediately from Lemma 8, so it remains to show the distance bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The guarantees of Lemma 8 then imply for all j ∈ [n], |6P(uj) − exp (Buj)| ≤ ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (15) We further have that uj ≤ 0, so exp(Buj) ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, we also have |6P(uj) + exp (Buj)| ≤ 2 + ξ ≤ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Combining yields for all j ∈ [n], ��36P(uj)2 − exp (2Buj) �� ≤ 3ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (16) Next, let yj := 36wj for all j ∈ [n], and note that y ∥y∥1 = w ∥w∥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We bound ���� w ∥w∥1 − z ∥z∥1 ���� 1 = � j∈[n] ���� yj ∥y∥1 − zj ∥z∥1 ���� ≤ � j∈[n] ���� yj ∥y∥1 − yj ∥z∥1 ���� + � j∈[n] ���� yj ∥z∥1 − zj ∥z∥1 ���� ≤ ����1 − ∥y∥1 ∥z∥1 ���� + ∥y − z∥1 ∥z∥1 ≤ 2 ∥y − z∥1 ∥z∥1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' (17) By using the definitions of y, z and (16), as well as the assumed ranges on q, ∥y − z∥1 ≤ 3nξ, ∥z∥1 ≥ ℓ exp(−c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The second inequality used that some vj = 2Buj is at least −c by assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Combining the above display with (17) and the definition of ξ concludes the proof of (14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, using the bounds on ∥y − z∥1 , ∥z∥1 above shows that ∥w∥1 = 1 36∥y∥1 ≥ 1 − δ 36 ∥z∥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Block-encoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our approximate Gibbs oracle follows an implementation strategy pioneered by [GSLW19] termed “block-encoding.” Specifically, we follow [GSLW19] and say that U, an (a + ℓ)- qubit quantum gate, is an ℓ-bit block-encoding of M if the top-left 2a × 2a submatrix of U is M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Block-encoded matrices admit efficient composable operations, such as the application of linear combinations and bounded polynomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We summarize these properties in the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 5 (Lemma 52, [GSLW19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let U1 and U2 be ℓ-bit block-encodings of M1, M2 of the same size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is an O(ℓ)-bit block-encoding of 1 2M1 + 1 2M2 which takes the same asymptotic time to apply as applying U1 and U2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 6 (Theorem 56, [GSLW19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let U be an ℓ-bit block-encoding of M, and P : [−1, 1] → [− 1 2, 1 2] be a degree-∆ polynomial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is an O(ℓ)-bit block-encoding of P(M) which can be applied in O(∆) applications of U and U† and O(ℓ∆) additional time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 24 We also demonstrate that an application of Corollary 4 yields a simple block-encoding of diag � A⊤x β � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' A similar construction previously appeared in [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let x ∈ Rm ≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let M := diag � A⊤x β � and U := O∗ A⊤x(SWAP12 ⊗ I)OA⊤x, where SWAP12 swaps the first two qubits and OA⊤x is from Corollary 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Then U is a block-encoding of M, and can be applied in time O(log m), with total building cost O(T log m) after T calls to Update.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Define wij := Aijxi β for convenience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By the definition of OA⊤x, we have that (SWAP12 ⊗ I) OA⊤x � |0⟩⊗(a+2)|j⟩ � = \uf8eb \uf8ed|00⟩ � i∈[m] √wij|i⟩ + |10⟩|g⟩ \uf8f6 \uf8f8 |j⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, for j, j′ ∈ [n], we compute ⟨j′|⟨0|⊗(a+2)U|0⟩⊗(a+2)|j⟩ as: ⟨j′| \uf8eb \uf8ed|00⟩ � i∈[m] √wij|i⟩ + |01⟩|g⟩ \uf8f6 \uf8f8 ∗ \uf8eb \uf8ed|00⟩ � i∈[m] √wij|i⟩ + |10⟩|g⟩ \uf8f6 \uf8f8 |j⟩ = �� i∈[m] wij = [A⊤x]j β j = j′ 0 j ̸= j′ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In particular the |01⟩ and |10⟩ terms disappear, and |j⟩, |j′⟩ are orthogonal unless j = j′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In the above, we required that √wij∗√wij = wij, which is only true if wij is nonnegative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To bypass this issue, we will implement the two copies of OA⊤x in slightly different ways, to obtain the correct signing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' For notational clarity, we let OL be the oracle which is conjugated on the left and OR be the oracle on the right, such that U = (OL)∗(SWAP12 ⊗ I)(OR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Note that x is entrywise nonnegative and β > 0, and hence the only factor determining the sign of wij is Aij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' When Aij ≥ 0, we will define the oracles O′ A used to load � Aij for OL and OR in a consistent way (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' use the same-signed square root), so that √wij2 = wij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' When Aij < 0 we will define them in an inconsistent way, so that after the conjugation operation, −√wij√wij = wij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We have thus shown that ⟨0|⊗(a+2)U|0⟩⊗(a+2) = M which implies the first conclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To see the second, all our gates are reversible (arithmetic circuits are reversible, and OA is its own inverse), and hence the complexity of applying O∗ A⊤x is the same as OA⊤x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, we put together the pieces and prove Proposition 2, which we use repeatedly throughout the paper to implement our Gibbs sampling oracles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let x ∈ Rm ≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let p be the Gibbs distribution associated with A⊤x, let Z := � j∈[n] exp([A⊤x]j) and �Z ∈ [Z, CZ] for some C ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, let q ∈ Rn have entries classically queriable in O(1) time, satisfy q ≥ p entrywise, qj ∈ [ δ n, 1] for all j ∈ [n], and ∥q∥1 = ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Suppose �Z, C, ρ, and β are explicitly known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Given a quantum oracle for A ∈ Rm×n (defined in Section 2) with ∥A∥max ≤ 1, we can implement a δ-approximate Gibbs oracle which has query cost O(√ρC · β log4 � Cmn δ � ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The total additional cost incurred if x undergoes T Update calls which preserve the invariants on �Z, C, ρ, β is O(T log m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Throughout the proof, let δ ← min(1 2, δ) and B := 4(β + log(Cn δ )).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Also define ℓ := δ n (following notation of Corollary 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We first observe that since maxj∈[n][A⊤x]j ≤ log Z ≤ 25 maxj∈[n][A⊤x]j + log n, − log(Cn) ≤ max j∈[n][A⊤x]j − log � �Zqj � ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Here, the upper bound used that for all j ∈ [n], exp([A⊤x]j − �Zqj) = pj qj · Z �Z ≤ 1 by assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, for v := A⊤x − log( �Zq) entrywise, −c ≤ max j∈[n] vj ≤ 0 for c := log(Cn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Next, we note log( �Zq) is entrywise bounded in magnitude by B 2 : log( �Zqj) ≤ log(CZ) ≤ log � n · max j∈[n] exp([A⊤x]j) � + log C ≤ B 2 , log( �Zqj) ≥ log Z + log δ n ≥ min j∈[n][A⊤x]j − log n δ ≥ −B 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Define M1 := diag � A⊤x 2B � and M2 := diag � − 1 2B log( �Zq) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By the calculations above, we have ∥M2∥op ≤ 1 2, and similarly it is clear that ∥M1∥op ≤ 1 2 because ��A⊤x �� ∞ ≤ β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Moreover, by using Corollary 6 with β ← B, we obtain U1, a block-encoding of M1 applicable in O(log m) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Using a similar construction as Corollary 6, since q, B, and �Z are all efficiently classically queriable, we obtain U2, a block-encoding of M2 applicable in O(1) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, Proposition 5 yields U, a block-encoding of M1 + M2 = diag � v 2B � , which can be applied in O(log mn) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Next, let P be the degree-∆ = O(B log Cn δ ) polynomial from Corollary 5, parameterized by B, v, c, q, ℓ as defined earlier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 5 shows that P : [−1, 1] → [− 1 2, 1 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Thus, Proposition 6 then yields U′, a block-encoding of diag � P( v 2B ) � which can be applied in O(∆ · log mn) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Furthermore, since q and ρ are efficiently classically queriable, we can define a gate Oq which is applicable in O(1) time and acts as Oq|0⟩⊗(b+1) = |0⟩ � j∈[n] �qj ρ |j⟩ + |1⟩|g⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Applying U′ to the output of Oq with appropriate ancilla qubits then yields |0⟩⊗O(1) � j∈[n] � qjP(uj)2 ρ |j⟩|gj⟩ + |g′⟩, where uj := vj 2B for all j ∈ [n].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Post-selecting on the first register being the all-zeroes state and measuring on the register corre- sponding to j, we see that we obtain a sample j ∈ [n] with probability proportional to qjP(uj)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By Corollary 5, conditioned on the sample succeeding, the resulting distribution is δ-close in ℓ1 to the distribution proportional to q ◦ exp(v) ∝ exp(A⊤x), and hence the result is a δ-approximate Gibbs oracle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Finally, we bound the query cost of the oracle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Define wj := P(uj)2qj and zj := exp(vj)qj as in Corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By definition of v, �Z, ∥z∥1 = � j∈[n] exp �� A⊤x � j � �Z ∈ � C−1, 1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 26 Moreover, the last conclusion in Corollary 5 shows ∥w∥1 ≥ 1 72 ∥z∥1 ≥ (72C)−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence, � j∈[n] qjP(uj)2 ρ = ∥w∥1 ρ ≥ 1 72Cρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In other words, we have an oracle which we can apply in time O(∆·log mn) which correctly returns a sample with probability α ≥ 1 72Cρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' By applying Proposition 4 to improve the success probability, we obtain the desired conclusion at a O(√Cρ log 1 δ ) overhead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Following notation of Proposition 2, let R := �Z Z .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a quantum oracle Otest which can be implemented under T Update calls to x in O(T log m) time, and has query cost O �� ρC · β log4 �Cmn ℓδ �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Furthermore, for explicitly known constants Cℓ and Cu, Otest returns “success” with probability p for Cℓ √Rρ ≤ p ≤ Cu √Rρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Our oracle Otest is the oracle from Proposition 2, except we will choose a sufficiently small constant value of δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' It returns “success” when the sample is accepted by the rejection sampler after boosting by amplitude amplification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Before boosting, the success probability from Proposition 2 is Θ( 1 Rρ) where the constants in the upper and lower bounds are explicit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Further, the constants from Proposition 4 are explicit, and hence boosting by amplitude amplification improves the success probability to Θ( 1 √Rρ) with known constant bounds as required by the corollary statement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' C Bounded approximation to exp on [−1, 1] Here, we give a proof of a lemma (with slightly different constants) used in the prior work [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This section builds entirely off prior results on polynomial approximation in [GSLW19];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' we include it for completeness because a proof was not given in [vAG19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' As a reminder, we stated and used the following result earlier when constructing our rejection sampler in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Lemma 8 (Lemma 7, [vAG19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let β ≥ 1, ξ ≤ 1 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a polynomial Pβ,ξ of degree O(β log 1 ξ ) such that maxx∈[−1,1] |Pβ,ξ(x)| ≤ 3 and maxx∈[−1,0] |Pβ,ξ(x) − exp(βx)| ≤ ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' To obtain the lemma, we will utilize the following result from [GSLW19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proposition 7 (Corollary 66, [GSLW19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let x0 ∈ [−1, 1], r ∈ (0, 2], δ ∈ (0, r].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Let f : [x0 − r − δ, x0 + r + δ] → C be such that f(x0 + x) = � ℓ≥0 aℓxℓ for all x ∈ [−r − δ, r + δ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Suppose B > 0 is such that � ℓ≥0(r + δ)ℓ|aℓ| ≤ B and let ǫ ∈ (0, 1 2B ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' There is a polynomial P (see Appendix D for its numerically stable implementation) of degree O � 1 δ log B ǫ � such that max x∈[x0−r,x0+r] |f(x) − P(x)| ≤ ǫ and max x∈[−1,1] |P(x)| ≤ ǫ + B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Proof of Lemma 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We apply Proposition 7 with f(x) := exp(βx) which has a convergent Taylor series everywhere, and the parameter settings x0 = −1, r = 1, δ = 1 β, B = e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We have that 27 f(x0 + x) = � ℓ≥0 exp(−β)βℓ·xℓ ℓ!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' = � ℓ≥0 aℓxℓ with aℓ = exp(−β)βℓ ℓ!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' for any integer ℓ ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' We also check that our choice of B is valid, via � ℓ≥0 (r + δ)ℓ|aℓ| = exp(−β) � ℓ≥0 � 1 + 1 β �ℓ βℓ ℓ!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' = exp(−β) � ℓ≥0 (β + 1)ℓ ℓ!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' = exp(β + 1 − β) = e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' Hence by Proposition 7, we have for any ξ ≤ 1 2e, there is a polynomial P of degree O(β log 1 ξ ) such that maxx∈[−2,0] | exp(βx) − P(x)| ≤ ǫ and maxx∈[−1,1] | ˜P(x)| ≤ e + 1 6 + ξ ≤ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' D Numerically stable implementation of polynomial approximation Throughout this section, let ∆ = O(1 ǫ log2(mn ǫ )) be the degree of the polynomial used in the proof of Proposition 2 in Appendix B (specifically, constructed in the proof of Proposition 2, where we have C = O(1) and δ = O(ǫ) in our applications).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' The polynomial we use is constructed via a decomposition in the Fourier basis (see Lemmas 57 and 65, [GSLW19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' It is not immediate that this polynomial transform can be implemented stably in finite-precision arithmetic, within the quantum singular value transformation framework of [GSLW19], which is used in the proof of Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' However, [Haa19] shows that given such a decomposition in the Fourier basis, we can obtain a numerically-stable implementation of the polynomial transformation required as a quantum circuit up to additive error ξ, in time O � ∆3 log �∆ ξ �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' In our setting (in the proof of Proposition 2), it is straightforward to check that ξ = poly(m, n, ǫ−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' This construction results in the additive term in Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} +page_content=' 28' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8tE2T4oBgHgl3EQfPwar/content/2301.03763v1.pdf'} diff --git a/9tFJT4oBgHgl3EQfoyxM/content/2301.11597v1.pdf b/9tFJT4oBgHgl3EQfoyxM/content/2301.11597v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..242ed155ea766f65ec98472c82fb3a61f5dbb7e5 --- /dev/null +++ b/9tFJT4oBgHgl3EQfoyxM/content/2301.11597v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79e625ab7976c129b39eeb312becb30ffc16cfba061f383270ca932118dd4de5 +size 2380508 diff --git a/9tFJT4oBgHgl3EQfoyxM/vector_store/index.faiss b/9tFJT4oBgHgl3EQfoyxM/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..9516d065020e95c60f811a1f84565aab37ca02fd --- /dev/null +++ b/9tFJT4oBgHgl3EQfoyxM/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b61ca1da7b6dec3b13aa0182ce54f1b0c4e26de338201553d91bb175319cc977 +size 2883629 diff --git a/9tFJT4oBgHgl3EQfoyxM/vector_store/index.pkl b/9tFJT4oBgHgl3EQfoyxM/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..239fbe764cc4f4b02b02e655a9836eba1a953064 --- /dev/null +++ b/9tFJT4oBgHgl3EQfoyxM/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53ddd3b99cd93382787d6b21f9b26b7d12aa05a675b89a31ed741f248b809bab +size 103487 diff --git a/AtAzT4oBgHgl3EQfhv1C/content/tmp_files/2301.01488v1.pdf.txt b/AtAzT4oBgHgl3EQfhv1C/content/tmp_files/2301.01488v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f29eb4f4b8898ad1e187ce1dfec819786beefd7 --- /dev/null +++ b/AtAzT4oBgHgl3EQfhv1C/content/tmp_files/2301.01488v1.pdf.txt @@ -0,0 +1,1850 @@ +Informed Down-Sampled Lexicase Selection: +Identifying productive training cases for +efficient problem solving +Ryan Boldi∗ +rbahlousbold@umass.edu +University of Massachusetts, Amherst, MA 01003, USA +Martin Briesch∗ +briesch@uni-mainz.de +Johannes Gutenberg University, Mainz, 55128, Germany +Dominik Sobania +dsobania@uni-mainz.de +Johannes Gutenberg University, Mainz, 55128, Germany +Alexander Lalejini +lalejina@gvsu.edu +Grand Valley State University, Allendale, MI 49401, USA +Thomas Helmuth +thelmuth@hamilton.edu +Hamilton College, Clinton, NY, 13323, USA +Franz Rothlauf +rothlauf@uni-mainz.de +Johannes Gutenberg University, Mainz, 55128, Germany +Charles Ofria +ofria@msu.edu +Michigan State University, East Lansing, MI 48824, USA +Lee Spector +lspector@amherst.edu +Amherst College, Amherst, MA 01002, USA +Abstract +Genetic Programming (GP) often uses large training sets and requires all individuals +to be evaluated on all training cases during selection. Random down-sampled lexicase +selection evaluates individuals on only a random subset of the training cases allow- +ing for more individuals to be explored with the same amount of program executions. +However, creating a down-sample randomly might exclude important cases from the +current down-sample for a number of generations, while cases that measure the same +behavior (synonymous cases) may be overused despite their redundancy. In this work, +we introduce Informed Down-Sampled Lexicase Selection. +This method leverages +population statistics to build down-samples that contain more distinct and therefore +informative training cases. Through an empirical investigation across two different GP +systems (PushGP and Grammar-Guided GP), we find that informed down-sampling +significantly outperforms random down-sampling on a set of contemporary program +synthesis benchmark problems. Through an analysis of the created down-samples, we +find that important training cases are included in the down-sample consistently across +independent evolutionary runs and systems. We hypothesize that this improvement +can be attributed to the ability of Informed Down-Sampled Lexicase Selection to main- +tain more specialist individuals over the course of evolution, while also benefiting from +reduced per-evaluation costs. +Keywords +Genetic programming, parent selection algorithms, selection schemes, lexicase selec- +tion, down-sampling, informed down-sampling +∗Both authors contributed equally. +©2022 by the Massachusetts Institute of Technology +Preprint +arXiv:2301.01488v1 [cs.NE] 4 Jan 2023 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +1 +Introduction +In Evolutionary Computation, we often use large sets of training data to evaluate the +quality of candidate solutions. For instance, most Genetic Programming (GP) systems +evaluate programs using input/output examples (training cases) that specify the ex- +pected behavior of a correct program. Many GP selection strategies aggregate each +program’s performance across all training cases to produce one fitness score that can be +used for selection. In contrast, lexicase selection (Spector, 2012; Helmuth et al., 2015) +avoids aggregation and considers each training case separately, which has been shown +to improve diversity maintenance (Helmuth et al., 2016; Dolson and Ofria, 2018) and +problem-solving success across a wide range of domains (Moore and Stanton, 2017; +Metevier et al., 2019; Aenugu and Spector, 2019; Ding and Spector, 2021; Lalejini et al., +2022). +However, standard lexicase selection has the drawback that we have to evaluate all +individuals on all training cases, which can be computationally expensive when eval- +uation is non-trivial. To reduce lexicase selection’s computational cost, recent work in- +troduced down-sampled lexicase selection (Moore and Stanton, 2017; Hernandez et al., +2019; Ferguson et al., 2020). In down-sampled lexicase selection, the training set is ran- +domly down-sampled, reducing the number of test case evaluations required to assess +the quality of each candidate solution. This in turn reduces the cost of evaluating an +entire set of individuals, allowing us to reallocate computational resources to other as- +pects of an evolutionary search (e.g., increasing search time or population size), which +can substantially improve problem-solving success (Helmuth and Spector, 2020, 2021; +Hernandez et al., 2019). However, a naive random down-sample can leave out poten- +tially important test cases, resulting in a loss of diversity (Ferguson et al., 2020; Helmuth +et al., 2020; Hernandez et al., 2022). +In order to put more computational effort towards evaluating individuals on im- +portant training cases, we propose informed down-sampling (IDS), which uses runtime +population statistics to build a down-sample that contains more distinct cases. +Given a set of solutions, two training cases are distinct from each other if the sub- +sets of solutions that solve each of the two test cases have little-to-no overlap. Two +training cases are synonymous if the opposite is true: there is substantial overlap be- +tween the subsets of solutions that solve each case*. Consequently, Informed down- +sampling favors the distinct training cases over synonymous cases when building a +down-sample to use for selection. We expect these informed down-samples to better +maintain unique individuals, increasing overall population diversity while also putting +more selection pressure on individuals whose descendants are more likely to solve the +problem. These unique individuals are often viewed as the stepping-stones for evolu- +tion to use in finding a perfect solution program (Helmuth et al., 2020). +To assess the performance of Informed Down-Sampled Lexicase Selection, we +compare lexicase selection without down-sampling (standard lexicase), with random +down-sampling, and with informed down-sampling across eight problems from the +first and second program synthesis benchmark suites (Helmuth and Spector, 2015; Hel- +muth and Kelly, 2021). We conduct our experiments in two independent GP frame- +works, Grammar-Guided Genetic Programming (G3P) (Whigham et al., 1995; Forsten- +lechner et al., 2016, 2017) and PushGP (Spector and Robinson, 2002; Spector et al., 2004). +We find that building a down-sample based on information we collect from the +*Synonymous cases can also be thought of as cases that have different inputs and outputs yet measure +a very similar functionality such that there is a high correlation between individuals’ performance on these +cases. +2 +Preprint + +Informed Down-Sampled Lexicase Selection +population is a valuable way to improve the success rates of evolutionary runs at a +fixed computational cost. Furthermore, simply tracking which cases are distinct, and +ensuring they are placed in a down-sample, can significantly improve problem solving +performance. Our results provide evidence that informed down-sampling improves +the success rate of search in the two GP systems used. By analyzing the composition +of down-samples, we also verify that informed down-sampling builds down-samples +that contain more informative test cases (i.e. edge cases) than random down-sampling. +2 +Related Work +In most GP applications, parent selection uses the performance of candidate solutions +on a set of training cases to pick individuals that contribute genetic material to the next +generation. Most selection algorithms aggregate the scores on these training cases to get +a single score per candidate and then select the most fit candidates using tournament +selection (Brindle, 1980), implicit fitness sharing (Smith et al., 1993), fitness proportion- +ate selection (Holland, 1992), or another selection strategy. The fitness aggregation pro- +cedure for these methods often results in a loss of semantic information about which +training cases the individual performs well on (Krawiec et al., 2016), motivating the +development of selection strategies that consider each individual’s performance on all +training cases encountered (Vanneschi et al., 2014; Goings et al., 2012; Deb et al., 2002; +Horn et al., 1994). +In contrast, lexicase selection does not aggregate fitness or performance measures +(Spector, 2012). For each parent selection event, the lexicase selection procedure first +places all individuals in the population into a “parent pool” (i.e., the pool of individ- +uals eligible to be selected). To select a parent, lexicase selection shuffles the training +cases into a random ordering, and each training case is considered in sequence. For +each training case, the parent pool is filtered down to just the individuals that have the +best (or tie for the best) performance, removing all but the best candidates from further +consideration. If there is only one individual that remains in the pool during this filter- +ing process, this individual is selected. If the training cases are exhausted and there are +still individuals in the pool, one of these individuals is selected at random. +Meanwhile, many variants of lexicase selection have been proposed for use in dif- +ferent problems or domains. For example, epsilon lexicase selection (La Cava et al., +2016; Moore and Stanton, 2017), batch lexicase selection (Aenugu and Spector, 2019; +Sobania and Rothlauf, 2022), gradient lexicase selection (Ding and Spector, 2021), lexi- +case selection for GAs (Metevier et al., 2019), weighted shuffle lexicase selection (Troise +and Helmuth, 2017), and fast lexicase selection (Ding et al., 2022). +One of the most promising variants of lexicase selection is down-sampled lexicase +selection, which was first proposed for expensive evolutionary robotics runs by Moore +and Stanton (2017) and later formalized by Hernandez et al. (2019) for GP runs. So far, +down-sampled lexicase selection increased the success and generalization rates for a +variety of problems (Ferguson et al., 2020). Down-sampled lexicase selection works by +randomly sampling once in each generation the training set to create a smaller set of +cases. These cases are then used to perform all selection events in the population for +that one generation. This limitation on the number of test cases reduces the computa- +tional costs of evaluating the individuals, which is usually one of the most expensive +operations in evolutionary runs. These savings could be used to perform computation- +ally cheaper GP runs, increase the population size, or run evolution for more genera- +tions. +Down-sampled lexicase selection has also been found to significantly outperform +Preprint +3 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +regular lexicase selection in a variety of program synthesis benchmarks (Hernandez +et al., 2019; Ferguson et al., 2020; Helmuth and Spector, 2020, 2021; Helmuth and Ab- +delhady, 2020). However, creating a down-sample randomly can exclude important +training cases from the current down-sample for a number of generations (Hernandez +et al., 2022), while synonymous cases may be overused. As a first attempt at chang- +ing the composition of cases in the down-sample, Boldi et al. (2022) explored using +a rolling down-sample and a disjoint down-sample for lexicase selection runs. While +the results were neutral-if-not-negative, they highlighted the presence of synonymous +cases in practice and suggest that an attempt at mediating the time put into evaluating +individuals on these synonymous cases might improve search performance. +Work in the EC literature that is related to informed down-sampling primarily +includes the co-evolution of fitness predictors and maximizers (Schmidt and Lipson, +2005, 2008; ˇSikulov´a and Sekanina, 2012). That work attempts to evolve a smaller set +of training cases, or fitness predictors, to evaluate the fitness of individuals instead of +using the entire training set. While our studied methods do not involve co-evolution, +they both result in a compressed training set that is roughly as informative as the set +of all available data. Another example is the use of random down-sampling to im- +prove performance of AutoML runs that use Genetic Programming (Zogaj et al., 2021). +In the broader machine learning community, random down-sampling is used to gen- +erate mini-batches for stochastic gradient descent (Ruder, 2017), and forms of non- +random down-sampling are used to detect hard or informative parts of the training +data (Loshchilov and Hutter, 2015; Bachem et al., 2017; Paul et al., 2021; Chrysakis and +Moens, 2020). +3 +Informed Down-Sampling +Informed down-sampling addresses randomly down-sampled lexicase’s drawback of +sometimes including many synonymous training cases in a down-sample, which is +computationally inefficient and can result in a failure to accurately assess candidate so- +lution quality. For example, down-sampled lexicase selection might fail to select candi- +date solutions that specialize on training cases absent from a particular random down- +sample, resulting in the loss of potentially important genetic material from the popu- +lation. Instead of down-sampling randomly, informed down-sampling creates down- +samples composed of more distinct training cases than a random sample would contain +using runtime population statistics. As a result, we expect informed down-sampling +lexicase selection to maintain more diverse populations, while reducing computation +spent on evaluating individuals on synonymous training cases. +We suggest two methods of building an informed down-sample. First, we explore +the idealized effectiveness of informed down-sampling by presenting it with full infor- +mation. This method requires evaluating the entire population on all training cases, +performing the same number of program executions per generation as normal lexicase +selection. Therefore, informed down-sampling with full information cannot capital- +ize on the computational savings afforded by random down-sampling. However, the +full information approach provides useful intuition for building an informed down- +sample, allowing us to measure the problem-solving success of our sampling approach +under idealized conditions. +Next, we present an approach for creating an informed down-sample that reduces +the number of per-generation evaluations required for selection (relative to standard +lexicase selection). This second approach, referred to as the “sparse information” ap- +proach, estimates the distinctness of training cases based on a sample of individuals +4 +Preprint + +Informed Down-Sampled Lexicase Selection +I1 +I2 +I3 +I4 +I5 +I6 +� +����� +� +����� +S1 +0 +1 +0 +1 +1 +0 +S2 +1 +1 +0 +0 +1 +1 +S3 +1 +0 +1 +1 +0 +1 +S4 +0 +1 +0 +0 +1 +1 +S5 +0 +1 +0 +1 +1 +0 +Figure 1: Example of the data structure that is used to determine distances between +cases. c1,...,5 are cases, with their respective solve vectors S1,...,5, and I1,...,6 are indi- +viduals. The entry at Sj and Ii represents whether the ith individual solved the jth test +case or not. The binary solve vectors Sj can be read off as the respective row for the +jth case. The distance between two cases, D(cx, cy), is the Hamming distance between +their respective solve vectors. For example, D(c1, c2) = 3 and D(c2, c3) = 4. +from the parent population. Indeed, building an informed down-sample using sparse +information results in nearly the same per-generation evaluation savings as when using +random down-sampling. +3.1 +Building an Informed Down-Sample with Full Information +In our informed down-sampling approach with full information, we create one down- +sample of training cases per generation, and we use candidate solution performances +on only the sampled training cases to choose parents with lexicase selection. To con- +struct an informed down-sample with full information, we evaluate all members of the +population on all training cases. In this work, each of these evaluations is on a pass/fail +basis. Next, we construct the “solve vector” Sj for each training case cj, which is a vec- +tor of binary values that specifies which individuals in the population have solved the +training case. We then calculate the Hamming distance between solve vectors for all +pairs of training cases, allowing us to measure how distinct training cases are relative +to one another. +We begin constructing the down-sample by randomly selecting an initial training +case to include. Then we find the training case whose solve vector is maximally distant +from the closest training case already included in the down-sample, and add it to the +down-sample. We repeatedly add training cases to the down-sample in this way until +reaching a parameterized sample size. +Figure 1 provides an example set of binary solve vectors for a set of five training +cases and a population of six individuals. +The columns in this matrix Ii describe the performance of the ith individual on +all cases. A value of 1 at (Ii, cj) implies that the ith individual solved the jth test case +(error = 0), or Si +j = 1. Since all members of a population of size p are evaluated on all +test cases (at least initially), we can say that ∥Sj∥ = p for all cases, cj. Thus, the number +of columns corresponds to the population size. +We define the distance between two training cases D(cx, cy) := Hamming(Sx, Sy) +where Hamming(·, ·) is the Hamming distance between two vectors. For binary vec- +tors, the distance function is defined as: D(cx, cy) = �p +i=1 |Si +x − Si +y|. Thus, two training +cases that are solved by the same set of individuals are deemed to have D(c1, c2) = 0 +Preprint +5 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +and are called “synonymous cases”. For example, for the cases in Figure 1, c1 and c5 +have identical solve vectors, and therefore are synonymous (D(c1, c5) = 0). +We think of this distance function as indicating the joint information contained in +a pair of cases. Two cases that have exactly the same individuals solving them (i.e. are +synonymous) have little to no joint information because having both of the cases in +the sample would be about as informative as just having one of them. Two cases that +have a high distance from each other, due to being solved by different subsets of the +population, have high joint information as each case is responsible for informing the +system about the performance of one set of individuals. Having both of these cases, as +opposed to one alone, would be a more faithful approximation of using the full training +set. +Once we have a method to evaluate the pairwise distance between two cases, we +can use it to select a down-sample of the training set for use in the current generation. +In this work, we apply a variant of Farthest First Traversal to select the down-sample +(Hochbaum and Shmoys, 1985). The creation of the down-sample starts with the selec- +tion of one random case to include. Then, at each step, we scan each unselected test +case and measure it’s minimum distance to any test in the current down-sample. We +select the case that has the largest minimum distance. In other words, we successively +add the test case that is furthest from the current down-sample at its nearest point. +Our Farthest First Traversal algorithm is shown in algorithm 1. Starting with an +empty down-sample, we first add a random case to the down-sample (line 4), and +then iteratively add the cases that are maximally far from the closest case to it (5-9). If +there are multiple cases with the same maximum minimum distance, ties are broken +randomly. The MinDisti value stores the distance from a given case, ci to the closest +case to it in the down-sample. The cases.popMaxMinDistCase() function removes +and returns the case in cases that has the maximum value for MinDisti. Note here +that it is often the case that the minimum distances all go to zero at a point during the +down-sample formation. At this point, every case left over in the training set has a +synonymous case in the down-sample already. When this happens, the farthest first +procedure will automatically select cases at random from the training set to fill up the +required down-sample size. Figure 2 shows an example of performing informed down- +sampling with full information using the case solve vectors from Figure 1. +Algorithm 1 Farthest First Traversal Down-Sample Selection +Data: D(·, ·) : D(ci, cj) = D(cj, ci) = distance from case i to case j, +r = down-sample rate +1: cases ← set of all cases in training set +2: ds ← empty set +▷ the down-sample +3: size ← r × |cases| +▷ desired size of down-sample +4: ds.add(cases.popRandomCase()) +5: while ∥ds∥ < size do +6: +for every case c in cases do +7: +MinDisti ← minimum distance from ci to any case in ds +8: +end for +9: +ds.add(cases.popMaxMinDistCase()) +10: end while +11: return ds +6 +Preprint + +Informed Down-Sampled Lexicase Selection +D = +c1 +c2 +c3 +c4 +c5 +� +����� +� +����� +c1 +0 +3 +4 +2 +0 +c2 +3 +0 +4 +1 +3 +c3 +4 +4 +0 +5 +5 +c4 +2 +1 +5 +0 +2 +c5 +0 +3 +5 +2 +0 +Random +� +�� +� +ds = {c1} +→ +c3 had max. distance to c1 +� +�� +� +ds = {c1, c3} +→ +c2 had max. min. distance to {c1, c3} +� +�� +� +ds = {c1, c3, c2} +Figure 2: Example running procedure of informed down-sampling with full informa- +tion to pick a down-sample of size 3 (or r = +3 +5). We have a tabular representation +of the distance function D generated by computing the Hamming distance between +each pair of cases’ solve vectors. Beginning with a randomly selected case c1, we se- +quentially add the cases that are at the maximum distance to their closest case in the +down-sample. The first step is simply finding the case (c3) in the training set with the +maximum distance to c1. To select the next case, we need to find, for c2, c4 and c5, +which of c1 and c3 is closest to them, respectively, and then which of those cases is far- +thest away. In this example, c2 was added as it had a higher distance (3) to its closest +case than did c4 or c5 (2 and 0, respectively). Notice that the cases that were left out, c4 +and c5, are synonymous or nearly synonymous with cases already in the down-sample: +c2 and c1, respectively. +3.2 +Building an Informed Down-Sample with Sparse Information +Down-sampled lexicase selection’s problem-solving benefits stem from the computa- +tional savings gained by not evaluating the entire population on the whole training set +for every generation. For a fixed computational budget, down-sampling allows more +computational resources to be allocated to other aspects of evolutionary search, such +as running for more generations or increasing population size. As a result, a larger +portion of the search space can be explored (Helmuth and Spector, 2021). Informed +down-sampling with full information requires the evaluation of all individuals on all +training cases in order to construct the down-sample to use in selection. This entire pro- +cess is counter productive, as we could have just used the initial population evaluation +to select individuals and circumvent the entire down-sampling process. The benefit of +down-sampling comes from its ability to use sparse information in the individual selec- +tion process. Since our aim is to improve on random down-sampling, we must reduce +the number of necessary program executions in order to calculate distances between +training cases, so that we can benefit from sparse evaluations in both our individual +selections and our down-sample creation. +We present two methods to decrease the number of evaluations required for the +pairwise distance calculation procedure. The first method, parent sampling, samples a +proportion ρ of the parents to evaluate the distances for every generation. These parent- +samples are evaluated on the entire training set. In our runs with a population size of +1000, if we were to randomly sample 0.01 (or ρ = 0.01) of these parents to become +the parent sample, these 10 parents would be evaluated on all training cases. This +results in case solve vectors of length 10 that are used to calculate the distances between +Preprint +7 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +cases. Distances between cases are determined purely based on these parent-sample +evaluations. We use the distance matrix generated from these parents to estimate the +joint informativeness. +The second method, scheduled case distance computation, involves recomputing the +distance matrix from the current population every k generations, as opposed to every +generation. This schedule reduces the amount of computation required for the evalua- +tion of case distances even further by not performing it every generation. While such +a schedule does not update the distances between cases as often, we still re-sample the +down-sample based on these distances every generation. Due to the stochastic nature of +the down-sample selection process (specifically the random selection of the first case), +it is likely that the same down-sample will not be used to evaluate the population in +consecutive generations. +In combination, parent sampling and scheduled case distance computation allow +us to select a down-sample using far less information about individuals while losing +only a small amount of information about cases and their similarity. This technique +enables informed down-sampling to explore nearly as many individuals as random +down-sampling does. Putting it all together, the informed down-sampling with sparse +information algorithm is detailed in Algorithm 2. This algorithm walks through a sin- +gle generation’s selection events, returning the parents for the next generation. +Algorithm 2 Informed Down-Sampling with Sparse Information +Data: +P : population, +cases: set of all training cases, +k : scheduled case distance computation parameter, +ρ : parent sampling rate, +G : current generation counter, +D : case distance matrix. +▷ all distances initialized to be maximally far +Result: A list of selected parents +1: if G%k == 0 then +2: +ˆP ← sample ρ×|P| parents from P +3: +evaluate ˆP on cases +4: +calculate D from case solve vectors from solutions in ˆP on cases +5: end if +6: D(·, ·) ← distance function derived from indexing into D +7: ds ← create downsample using farthest first traversal down-sampling (See Algo 1) +8: P ← select |P| new parents using lexicase selection from P using ds as cases +9: return P +4 +Experimental Methods +We conducted a series of experiments to study the performance of informed down- +sampled lexicase selection. We compared the performance of informed down-sampled, +random down-sampled, and standard lexicase selection on a series of program synthe- +sis benchmark problems. We performed all experiments in two independent genetic +programming systems to show that the findings are robust across different program +representations: PushGP and Grammar Guided Genetic Programming (G3P). +This section introduces the benchmark problems and genetic programming sys- +tems used in our experiments and describes our experimental design. +8 +Preprint + +Informed Down-Sampled Lexicase Selection +Table 1: Program synthesis benchmark problems selected from the first and second gen- +eral program synthesis benchmark suite, along with their respective input and output +types and multiplicities. +Problem +Suite +Input Type +Output Type +Count Odds +PSB1 +Vector of Integer +Integer +Find Pair +PSB2 +Vector of Integer +Two Integers +Fizz Buzz +PSB2 +Integer +String +Fuel Cost +PSB2 +Vector of Integer +Integer +GCD +PSB2 +Two Integers +Integer +Grade +PSB1 +Five Integers +String +Scrabble Score +PSB1 +String +Integer +Small or Large +PSB1 +Integer +String +4.1 +Program Synthesis Benchmark Problems +We evaluate each system using eight program synthesis benchmark problems from the +first and second general program synthesis benchmark suites (Helmuth and Spector, +2015; Helmuth and Kelly, 2021). These problems are well-studied and are commonly +used to compare parent selection algorithms in a GP context (Sobania et al., 2022b,a). +These two benchmark suites include a variety of introductory program synthesis prob- +lems that require the manipulation of multiple data types with complex looping or +conditional structures. +Each benchmark problem is defined by a set of input/output examples (referred +to as cases) that specify the desired behavior of a correct program. For each problem, +we split the input/output examples into a training set and a testing set. During evolu- +tion, we assessed program quality using only the training set. We used the testing set +to measure how well a program generalized on examples unseen during evolution. We +consider each input/output example on a pass/fail basis; that is, a program passes a +test case if it produces the correct output when run with the associated input. A pro- +gram is a solution if it passes all of the training cases; it generalizes if it passes all training +and all testing cases. We refer to runs as “success” if they result in the production of +a generalizing solution. We used the same training and testing data sets across both +PushGP and G3P for each problem to ensure the data available is not biasing perfor- +mance. +Table 1 shows the eight program synthesis benchmark problems that we have cho- +sen, along with their input and output types. We selected these particular problems to +allow us to test informed down-sampling on a set of easy, medium, and hard problems +as established by published success rates using PushGP and random down-sampled +lexicase selection (Helmuth and Spector, 2021; Helmuth and Kelly, 2022). We also en- +sured that these problems require qualitatively different programmatic paradigms to +solve, such as looping and conditional execution (Helmuth and Kelly, 2022). +4.2 +Genetic Programming Systems +PushGP is a system that evolves computer programs in the Push programming lan- +guage, a stack-based language specifically invented for use in genetic programming +(Spector and Robinson, 2002; Spector et al., 2004). Push literals are pushed onto one +of a set of datatype specific stacks while instructions are also stored on a stack dur- +ing interpretation. These instructions usually act on data from the stacks and leave +Preprint +9 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +Table 2: General and System-Specific Evolution Parameters +General Parameter +Value +runs per problem +100 +population size +1,000 +size of training set +200 +size of test set +1,000 +program execution limit +60 million +maximum number (base) of generations +300 +PushGP Parameter +Value +variation operator +UMAD +UMAD rate +0.1 +G3P Parameter +Value +crossover operator +subtree crossover +crossover probability +0.95 +mutation operator +subtree mutation +mutation steps +1 +maximum tree depth +17 +elite size +5 +initialisation +position-independent grow +maximum initial tree depth +10 +their return value on the stacks. Instructions take values from and return results to +the appropriately typed stack, including from and to the instruction stack, allowing for +programs to use multiple data types and complex conditional execution paradigms. In +this work, we used the propeller implementation of PushGP†. +G3P uses a context-free grammar in Backus-Naur form to evolve individuals in a +desired programming language and supports the use of different data types and con- +trol structures (Whigham et al., 1995; Forstenlechner et al., 2016, 2017). To prevent the +generation of many invalid solutions during search, we use a tree-based representation +instead of the common genotype-phenotype mapping known from classical grammat- +ical evolution (Ryan et al., 1998; Sobania and Rothlauf, 2020). For the implementation +of G3P, our code‡ is based on the PonyGE2 framework (Fenton et al., 2017). +Table 2 shows the system-specific parameters for PushGP and G3P, and the general +parameters that are used in both systems. The “runs per problem” parameter refers to +the number of independent evolutionary runs that were conducted for each problem +and experimental configuration. The PushGP system uses the uniform mutation by ad- +dition and deletion (UMAD) mutation operator (Helmuth et al., 2018). This UMAD op- +erator works with a 0.1 mutation rate. For G3P, we use subtree mutation and crossover, +with a crossover probability of 0.95. The initialization for G3P is position-independent +grow (Fagan et al., 2016). We use grammars based on those provided by the PonyGE2 +framework with small adjustments to make them better comparable to the PushGP +instructions. +†https://github.com/ryanboldi/propeller/releases/tag/Informed-Downsampling +‡https://gitlab.rlp.net/mbriesc/informed-down-sampled-lexicase-selection +10 +Preprint + +Informed Down-Sampled Lexicase Selection +4.3 +Evaluation and Generation Limits +In order to make a fair comparison between methods that perform different numbers of +program executions per generation, we use the recommendation from the PSB2 bench- +mark suite to limit each GP run to 60 million program executions (Helmuth and Kelly, +2021). Since program executions typically take up the majority of the computational +requirements of a GP run, this ensures runs receive similar amounts of computation re- +gardless of whether they use down-sampling. In standard runs using all training cases, +the 60 million executions are used by at most 300 generations of a population size of +1000 individuals evaluated on 200 cases. With random down-sampling, we increase +the maximum number of generations by the same factor as the down-sampling. For +example, if one tenth of the training data is used in each sample, we can run evolu- +tion for ten times the number of generations while keeping the number of individual +program executions constant. +More generally, if we let G be the maximum number of generations for a run using +all training cases, we allow our random down-sampling runs a limit of ˆG generations +where ˆG is given by +ˆG = G +r , +where r is the down-sample rate. For informed down-sampled lexicase selection the +generational limit is calculated by +ˆG = +G +r + ρ(1−r) +k +, +where ρ is the parent sampling rate and k is the parameter for the scheduled case dis- +tance computation. The exact generational limits for each experimental configuration +are shown in table 3.§ +4.4 +Experimental Configurations +We explore 11 different configurations of lexicase selection for each problem: standard +lexicase selection (Lex), random down-sampled lexicase selection (Rnd), IDS lexicase +selection with full information, as well as three sparse information configurations. To +better match previous literature, all down-sampling methods were performed both +with r ∈ {0.05; 0.1}. +Table 3 shows the configurations of the different runs performed in this work. +These runs, due to different generational computational costs, have different genera- +tional limits as explained in section 4.3. +Full information down-sampling is simply using a parent-sample rate of 1, which +means that the distances between training cases are determined by all parents’ perfor- +mance on every test case. With this, the quality of the distance metric between two +cases is not limited by the parent-sampling or generational gaps we are using to reduce +computational load. Full information down-sampling is included as a control exper- +iment to compare with using all cases for selection in standard lexicase selection. It +is important to note that we run for the same number of generations as with regular +lexicase selection because we need to evaluate all parents on all test-cases in order to +determine the distances between the cases. +§As our implementations evaluate the fitness of individuals in the parent sample twice, we run the IDS +with sparse information runs for slightly (< 40) fewer generations to compensate the additional computa- +tional effort. +Preprint +11 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +Table 3: Different settings conducted in our experiments for standard lexicase selection +(Lex), random down-sampled lexicase selection (Rnd) and informed down-sampled +lexicase selection (IDS). The variable r denotes the down-sampling rate, ρ is the parent +sampling rate, k is generational interval at which we update the distance matrix and ˆG +specifies the maximum number of generations. +Method +Lex +Rnd +IDS +Rnd +IDS +r +- +0.05 +0.05 +0.1 +0.1 +ρ +- +- +1 +0.01 +0.01 +0.01 +- +1 +0.01 +0.01 +0.01 +k +- +- +1 +1 +10 +100 +- +1 +1 +10 +100 +ˆG +300 +6000 +300 +5042 +5888 +5988 +3000 +300 +2752 +2973 +2997 +Finally, the six informed down-sampling methods we have chosen for this work +include, for both the 0.05 and 0.1 down-sample rate (r), 0.01 parent sampling (ρ) rate +with a few different distance calculation scheduling (k) parameters. Through a set of +preliminary experiments, the value of ρ = 0.01 for the parent sampling rate was de- +termined to be effective while not resulting in too many extra program executions¶. +In conjunction, these hyper-parameters mean that every k generations, 10 parents are +used to determine the distances between all training cases, where k ∈ {1, 10, 100}. +5 +Results and Discussion +We discuss the success rates achieved by both GP systems using standard lexicase se- +lection, random down-sampling, and different configurations of IDS. Further, we study +how the composition of the down-samples found by IDS change over the number of +generations. +5.1 +Informed Down-Sampling Improves Problem-solving Success +Tables 4 and 5 show the success rates for PushGP and G3P respectively on the chosen +program synthesis benchmark problems for different parameter configurations. The +success rate is defined as the number of runs that result in a program that passes the +complete training set as well as the entire unseen test set. +For random down-sampling and IDS, we measured solutions on only the down- +samples during the actual run. As such, we execute these runs to the maximum gener- +ational limit, and then conduct a post-hoc analysis to see if any solutions passed all of +the training cases. If so, this is the solution that we then evaluate on the unseen test set +to determine whether it generalizes or not. +For all studied configurations, we report success rates based on 100 runs. For each +benchmark problem, we highlight in bold the best success rate at each of the down- +sample sizes. Problem names in bold are those where an informed down-sampling +run outperformed random at both down-sample rates on that problem. Problem names +that are underlined are those where a random down-sampling run outperformed an +informed down-sampling run at both down-sample rates. Asterisks signify results +that are significantly better than random down-sampling at the same down-sample size. +¶As we are trying to approach the computational savings of random down-sampled lexicase selection, +the smaller the value of ρ, the better. We found that the relatively small value of ρ = 0.01 resulted in sampling +that was good enough to determine the joint case information. +12 +Preprint + +Informed Down-Sampled Lexicase Selection +Standard lexicase selection was not included in our statistical analyses, as IDS is pre- +sented to improve upon random down-sampling at a fixed down-sample size. We per- +formed significance analysis with a two proportion z-test and Bonferroni-Holm correc- +tion. Shown with * are those significant at the α = 0.1 level, ** the α = 0.05 level, and +*** the α = 0.01 level. +For the PushGP results, let us consider the Fizz Buzz problem. Standard lexicase +selection had 13 successful runs. Using random down-sampling at the 0.05 down- +sampling rate improved this result to 64, in line with the findings of Helmuth and +Spector (2021). Using the same down-sampling rate with IDS, a 0.01 parent rate, and +k = 100 yielded 95 successful runs. This is significantly better than random down- +sampling at the 0.01 level. This is an important result as IDS is significantly improving +on random down-sampling, which in turn improves on lexicase selection. Another set +of PushGP IDS runs where we observed significant improvements were those of the +Count Odds problem. While standard lexicase selection achieves 24 successes, random +down-sampling at either down-sample rate (r = 0.05 or r = 0.1) does not produce +more than 26 successful runs. The failure to meaningfully improve success rates by +random down-sampling seemed to be addressed by informed down-sampling. This +is clear as informed down-sampling at all configurations ensures that close to if-not- +all 100 runs successfully generalize to the held out test set. This and similar results +hint that while randomly down-sampled lexicase selection works well usually, there +are some problems where important cases might be being dropped out, resulting in a +similar performance to standard lexicase selection despite the increased search gener- +ations. Informed down-sampling has the ability to improve success rates both when +random down-sampling improves upon standard lexicase selection, and when it does +not. +Only one configuration of G3P resulted in a significant improvement on random +down-sampling at the same down-sample rate. For the Grade problem at the 0.05 +down-sample rate, we see significantly more successes when using IDS with ρ = 0.01 +and k = 10. For this problem, using this informed down-sample configuration re- +sulted in 57% of the runs yielding a generalizing solution, where, using random down- +sampling resulted in only 39% of the runs yielding a success. The fact that only a single +configuration of IDS resulted in a significant improvement suggests that the problem- +solving benefits of using IDS are representation- and problem-dependent, motivating +future work to continue improving IDS to achieve more universal improvements to +problem-solving success. +We have a number of hypotheses explaining this improved performance. The first +of these is that the informed down-sampling procedure increases the number of spe- +cialists (individuals exceptional on a few cases, but have a high total error) that survive +over the course of evolutionary time. These individuals could be better maintained +with IDS as the cases they are exceptional on are still placed in the down-samples +throughout evolution, preventing them from being lost as could happen when ran- +domly down-sampling. +Another hypothesis for IDS’s improved performance is that it reduces the compu- +tation used to evaluate individuals on synonymous cases. When two cases are fully +synonymous, all individuals that solve one case solve the other as well. When using +lexicase selection, having both of these cases in the down-sample would result in little +difference in the probability of selecting each individual compared to having only one +case in the down-sample. After one of the two cases has been used to filter the pool +of candidate solutions, the other will have no filtering pressure because all remaining +Preprint +13 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +Table 4: Number of generalizing solutions (successes) out of 100 runs achieved by PushGP on the test set. +Method +Lex +Rnd +IDS +Rnd +IDS +r +- +0.05 +0.1 +ρ +- +- +1 +0.01 +0.01 +0.01 +- +1 +0.01 +0.01 +0.01 +k +- +- +1 +1 +10 +100 +- +1 +1 +10 +100 +Count Odds +24 +25 +43*** +99*** +100*** +98*** +26 +55*** +95*** +99*** +97*** +Find Pair +5 +27 +9 +32 +32 +36 +15 +7 +19 +19 +21 +Fizz Buzz +13 +64 +2 +85*** +94*** +95*** +45 +3 +75 +78* +81** +Fuel Cost +41 +72 +1 +83 +85 +83 +76 +7 +69 +72 +70 +GCD +20 +74 +4 +76 +67 +69 +54 +6 +56 +63 +62 +Grade +0 +0 +0 +0 +1 +0 +1 +0 +0 +1 +1 +Scrabble Score +8 +8 +6 +69*** +64*** +75*** +16 +9 +55*** +74*** +64*** +Small or Large +34 +93 +37 +69 +69 +69 +69 +39 +60 +66 +54 +14 +Preprint + +Informed Down-Sampled Lexicase Selection +Table 5: Number of generalizing solutions (successes) out of 100 runs achieved by G3P on the test set. +Method +Lex +Rnd +IDS +Rnd +IDS +r +- +0.05 +0.1 +ρ +- +- +1 +0.01 +0.01 +0.01 +- +1 +0.01 +0.01 +0.01 +k +- +- +1 +1 +10 +100 +- +1 +1 +10 +100 +Count Odds +65 +66 +45 +53 +62 +63 +67 +58 +60 +58 +72 +Find Pair +0 +0 +0 +1 +0 +0 +1 +0 +0 +1 +0 +Fizz Buzz +62 +83 +50 +84 +78 +85 +78 +53 +81 +89 +72 +Fuel Cost +33 +34 +17 +28 +27 +29 +29 +21 +21 +25 +33 +GCD +0 +1 +0 +0 +0 +1 +0 +0 +0 +0 +0 +Grade +36 +39 +29 +51 +57* +44 +44 +37 +46 +51 +48 +Scrabble Score +6 +10 +1 +11 +10 +10 +14 +0 +6 +3 +3 +Small or Large +41 +52 +49 +54 +63 +63 +59 +52 +57 +55 +63 +Preprint +15 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +individuals perform identically on the synonymous cases. Having a synonymous case +does increase the chance that one of the two cases appears earlier in the shuffled case +ordering, producing a minor (though perhaps undesired) change in selection proba- +bility. Synonymous (or near synonymous) cases additionally take spots in the down- +sample that cannot be allocated to other, more-informative cases. When using IDS, we +ensure that the first few cases added to the down-sample measure relatively different +behaviors. This may allow IDS to select a larger variety of individuals than random +down-sampling, instead approximating the variety that could be selected by full lexi- +case selection. +These results, in general, make it clear that informed down-sampling by farthest +first traversal is significantly outperforming randomly down-sampled lexicase selec- +tion on a portion of these program synthesis benchmark problems for the PushGP +evolutionary framework. The G3P results are less clearly in favor of informed down- +sampling, but still point to minor improvements in success rates. It is important to +note that all of our down-sampled runs (besides full-information) consistently and sig- +nificantly outperform standard lexicase selection, which has in turn been shown to +significantly outperform other selection strategies. This result agrees with that of Hel- +muth and Abdelhady (2020), showing down-sampled lexicase selection being, before +this work, the state of the art in program synthesis with genetic programming. Our in- +formed down-sampling runs outperform random down-sampling (higher success rate +for both down-sample rates) on 6/8 of the problems we studied for PushGP, with 3/8 +of them being statistically significant. For G3P, informed down-sampling improves on +3/8 problems, with 1/8 being significant. +Random down-sampling outperformed informed down-sampling (across both +down-sampling levels) on only one problem (Small or Large) for PushGP, and none for +G3P. For Small or Large with PushGP, we see that the worse performance with informed +down-sampling can be attributed to a lower generalization rate (and not worse perfor- +mance on the training sets). The generalization rates can be found in Appendix Figure 6 +for PushGP and Appendix Figure 7 for G3P. Future work should explore the effect that +informed down-sampling has on generalization in more depth. +5.2 +Using Smaller Informed Down-Samples Tends to Improve Success Rates +In general, our IDS runs at a 0.05 down-sample rate have a higher success rate than +their equivalent counterparts at the 0.1 down-sample rate. This difference is likely due +to the fact that the runs at a 0.1 down-sample rate have a substantially lower genera- +tional limit, meaning that we are exploring a smaller portion of the space of possible +solution programs. With 200 training cases, our down-sample contains 10 and 20 cases +respectively for the 0.05 and 0.1 down-sample rates. A possible reason for the improved +performance at 0.05 is that a larger proportion of these cases are indeed our distinct, or +informative, cases. Note that once the Farthest First Traversal process selects a rep- +resentative case for every synonymous group in the down-sample, every remaining +solution’s minimum distances to the current sample will be equal to 0, so the selections +are performed randomly to fill the rest of the cases. Since we are using the same prob- +lems, with the same number of behavioral niches, we will see the runs with 20 cases in +the down-sample having more synonymous cases in the down-sample. Due to the fact +that the content of the training cases is not notably more informative to make up for the +decreased generational limit, we see a lower success rate. We will analyze the specific +cases that compose the down-samples in section 5.3. +The exceptions to this trend are the full information down-sampling runs. For +16 +Preprint + +Informed Down-Sampled Lexicase Selection +these runs, the larger down-samples tend to perform better. This result is likely due +to the fact that the generational limit was set to 300 for both sampling levels (as they +both evaluate all individuals on all test cases), and so having a smaller down-sample +size would not change the number of evaluations. With more cases in the sample, the +GP method can take into account more information when performing selection, which +could result in more informed search. The magnitude of the differences for success rate +across sample size for the full IDS runs suggests that there are diminishing returns for +including more cases in the sample. +5.3 +Informed Down-Sampling Automatically Discovers Important Training Cases +To gain a deeper insight into how IDS composes down-samples, we visualize how the +selected training cases (used for a down-sample) develop over the generations of an +evolutionary run. +Figures 3 and 4 show the composition of down-samples for every problem at every +generation using PushGP (Fig. 3) and G3P (Fig. 4) with down-sample rate r = 0.05. We +present results for a full information configuration (ρ = 1 and k = 1) as well as a +sparse information configuration (ρ = 0.01 and k = 10). We chose to analyze both a full +information and sparse information run in order to see whether our sparse information +configurations are finding the same training cases to be informative as if we had used +all parents to evaluate the distances between training cases. +The plots show how often certain training cases are included in the down-sample +at every generation, averaged over all active runs. Each row represents a case in the +training data, ordered by its position in the training set. The training sets used were +generated by first adding some human-expert defined edge cases, and filling the rest +with cases that were randomly generated by an function that already implements our +desired program (oracle function). For each figure, there is a single marker on the y- +axis that shows where exactly the expert-case cutoff for the training set was. Thus, the +rows above the marker in the visuals are representing cases that humans determined +to be important based on the problem definition. +Brighter colors imply that a case is included more often, darker colors imply a +lower number of inclusions. +For PushGP (Figure 3), we see that the configurations with sparse information of- +ten include the same cases in the down-sample as the runs with full information. This +result means that by using a parent sampling rate of ρ = 0.01 and a case distance +evaluation schedule parameter of k = 10, we can significantly reduce the number of +evaluations needed to calculate distances between cases, while still maintaining a good +approximation to the ground truth (full information, where we use all parents every +generation to calculate distances). However, the composition for our sparse informa- +tion runs are slightly more noisy than that for full information, suggesting that using +parent sampling could introduce some extra stochasticity to the down-sample creation +process. +For all studied benchmark problems, we see that IDS has a strong bias toward +specific training cases that are included substantially more often in the down-sample. +These selected training cases are mainly consistent with the human-defined edge cases +that exist at the beginning of the training set. This result shows that informed down- +sampling is indeed often finding the same cases to be informative as those that a human +expert would, without any knowledge of the problem definition. However, with IDS, +we can draw further comparisons of informativeness within this expert-defined groups +of cases. This can be seen as some cases are selected more often that others within the +Preprint +17 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +Full Information +Sparse Information +Count Odds +Cases +Find Pair +Cases +Fizz Buzz +Cases +Generations +Fuel Cost +Cases +Generations +Figure 3: Down-sample composition over generations for PushGP with 0.05 down- +sample rate for a full information (ρ = 1 and k = 1) and a sparse information configu- +ration (ρ = 0.01 and k = 10). +first several cases. +We then look at the labels of the specific training cases that are found to be impor- +tant. We see that these training cases make sense to be included more often than others +in the down-samples. Note that the labels of the specific training cases are not included +18 +Preprint + +Informed Down-Sampled Lexicase Selection +Full Information +Sparse Information +GCD +Cases +Grade +Cases +Scrabble Score +Cases +Generations +Small or Large +Cases +Generations +Figure 3: Continued. +in the plots for simplicity, but can be queried based on their specific index in the data +sets provided in our code implementation. +For example, for the Small or Large problem, cases around the decision boundaries +as well as numbers between 0 and 1000 are more often included. For the Grade problem, +those edge cases with very close decision boundaries are included while the ones with +far away boundaries are not taken into account for the down-sample. For Fuel Cost, +Preprint +19 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +Full Information +Sparse Information +Count Odds +Cases +Find Pair +Cases +Fizz Buzz +Cases +Generations +Fuel Cost +Cases +Generations +Figure 4: Down-sample composition over generations for G3P with 0.05 down-sample +rate for a full information (ρ = 1 and k = 1) and a sparse information configuration +(ρ = 0.01 and k = 10). +nearly all of the human defined edge cases are found to be important, while for the +GCD problem the first two cases in particular make it in nearly every down-sample, +while the rest are selected less often. +20 +Preprint + +Informed Down-Sampled Lexicase Selection +Full Information +Sparse Information +GCD +Cases +Grade +Cases +Scrabble Score +Cases +Generations +Small or Large +Cases +Generations +Figure 4: Continued. +For the Scrabble Score problem, we see that the first edge cases, which specify the +score for each letter, does not seem to be informative at all. This result is not surprising, +as this information is already available to PushGP through a vector with these scores +on the vector stack. However, the three edge cases after them with empty strings and +special characters as input are included a lot. For Count Odds, the edge cases denot- +ing empty lists, or lists with zero or a single odd number were found to be important, +Preprint +21 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +indicating that those contain all the important information to learn what are odd and +even numbers as well as how to handle a list. For Fizz Buzz, all edge cases seem im- +portant while for the Find Pair problem only those edge cases with lists of length 3 are +consistently included. Those lists of length 2 in the edge cases are represented in the +down-sample less often. +Lastly, we see that the composition of the down-sample stays rather stable during +the evolutionary run for the PushGP system, explaining why there is only a small dif- +ference in our experiments between calculating the distances every k = 1 and k = 100 +generations (see Table 4). +For G3P (Fig 4), we see similar results as with PushGP. However, for the prob- +lems that require iterative structures to be solved (Count Odds, Find Pair) we see that +the down-sample quickly dissolves into random noise instead of any form of struc- +ture. This dynamic occurs despite the fact that the same edge cases as with PushGP are +initially identified in the first few generations. This result is not surprising as finding it- +erative structures is known to be challenging for grammar-guided approaches, as such +structures are difficult to be built step-by-step guided by the performance on a set of +training cases. (Sobania and Rothlauf, 2020; Sobania et al., 2022b). Another difference +between the case compositions are that, while IDS with G3P tends to discover the same +cases as those found with PushGP, their use is less consistent, resulting in lines that +are more faint than those for PushGP. Both of these hypotheses could help explain the +relatively worse improvement that IDS yields for G3P than for PushGP. +However, for the problems that require conditionals, like Small or Large and Grade, +we see that the important cases are identified and used during evolution. This result is +also reflected in the success rates compared to random down-sampling (see Table 5). +Interestingly, IDS identifies many of the same cases as important for G3P as well as +PushGP. This result suggests that the structure of the problem itself determines which +cases are important rather than the considered representation. This dynamic makes +IDS potentially useful across many different systems and approaches. +6 +Conclusion and Future work +In this work, we proposed a novel approach to construct down-samples in an informed +manner during evolution when using down-sampled lexicase selection. We find that +changing the composition of down-samples to include cases that are more “informa- +tive” helps improve problem solving performance with a fixed computational bud- +get. Informativeness, we hypothesize, is linked to how distinct the cases in the down- +sample are. Cases that are solved by the same subset of the population are likely testing +for the same behavior, and thus need not be included in the down-sample at the same +time. Cases that test for different behaviors likely maintain different behavioral groups +of individuals, which could promote and maintain higher levels of diversity in the pop- +ulation. +In our empirical comparisons of these down-sampling methods, we find evidence +to support the conclusion that selecting cases in an informed manner increases the suc- +cess rate of GP runs. These results were confirmed across two independent GP systems +by using well studied benchmark problems. We find that using IDS often increases the +proportion of informative cases in the down-sample as verified by improved success +rates as well as by directly inspecting the content of the down-samples. IDS improves +upon the state of the art selection method across the majority of the program synthesis +problems explored in this work. +This work is a first exploration into changing the case composition of down- +22 +Preprint + +Informed Down-Sampled Lexicase Selection +samples for lexicase selection runs. As such, it opens many potential directions for +future research. Due to the modular nature of the informed down-sampling system, +different methods could be used for either the pairwise information measurement, or +for the down-sample creation portions of the algorithm. An exploration into differ- +ent down-sampling levels, and the effects levels have on the informational content of +down-samples is also a promising direction for future work. Additionally, IDS intro- +duces new hyperparameters for the parent sampling rate and generational schedule; +it would be beneficial to create a method for automatically setting these dependant on +the problem and the state of the GP search. Finally, even though there are reasons to +believe that IDS and down-sampling in general work well with lexicase selection, there +is nothing that ties them to a particular selection method; it may be informative to +explore the effects of IDS on other parent selection methods such as tournament selec- +tion. Finally, comparing the extent to which different down-sampling strategies blunt +lexicase’s ability to maintain specialists could also yield important insights into why +informed down-sampling improves success rates as much as it does. +7 +Acknowledgements +This material is based upon work supported by the National Science Foundation un- +der Grant No. 1617087. Any opinions, findings, and conclusions or recommendations +expressed in this publication are those of the authors and do not necessarily reflect the +views of the National Science Foundation. +This work was performed in part using high performance computing equipment +obtained under a grant from the Collaborative R&D Fund managed by the Mas- +sachusetts Technology Collaborative. +Parts of this research were conducted using the supercomputer Mogon and/or ad- +visory services offered by Johannes Gutenberg University Mainz (hpc.uni-mainz.de), +which is a member of the AHRP (Alliance for High Performance Computing in +Rhineland Palatinate, www.ahrp.info) and the Gauss Alliance e.V. +The authors would like to thank Anil Saini, Austin Ferguson, Cooper Sigrist, Con- +stantin Weiser, Edward Pantridge, Jose Hernandez, Li Ding and the Members of the +PUSH lab at Amherst College for discussions that helped shape this work. +References +Aenugu, S. and Spector, L. (2019). Lexicase selection in learning classifier systems. In Proceedings +of the Genetic and Evolutionary Computation Conference, GECCO ’19, page 356–364, New York, +NY, USA. Association for Computing Machinery. +Bachem, O., Lucic, M., and Krause, A. (2017). Practical coreset constructions for machine learn- +ing. arXiv: Machine Learning. +Boldi, R., Helmuth, T., and Spector, L. (2022). +Exploring Environmental Change for Down- +Sampled Lexicase Selection. volume Why it Didn’t Work-Shop of ALIFE 2022: The 2022 Con- +ference on Artificial Life. +Brindle, A. (1980). Genetic algorithms for function optimization. PhD thesis, University of Alberta. +Chrysakis, A. and Moens, M.-F. (2020). Online continual learning from imbalanced data. In III, +H. D. and Singh, A., editors, Proceedings of the 37th International Conference on Machine Learning, +volume 119 of Proceedings of Machine Learning Research, pages 1952–1961. PMLR. +Deb, K., Pratap, A., Agarwal, S., and Meyarivan, T. (2002). A fast and elitist multiobjective genetic +algorithm: NSGA-II. IEEE Transactions on Evolutionary Computation, 6(2):182–197. +Preprint +23 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +Ding, L., Boldi, R., Helmuth, T., and Spector, L. (2022). Lexicase selection at scale. In Genetic +and Evolutionary Computation Conference Companion (GECCO ’22 Companion), July 9–13, 2022, +Boston, MA, USA. +Ding, L. and Spector, L. (2021). Optimizing neural networks with gradient lexicase selection. In +International Conference on Learning Representations. +Dolson, E. and Ofria, C. (2018). Ecological theory provides insights about evolutionary compu- +tation. In Proceedings of the Genetic and Evolutionary Computation Conference Companion, GECCO +’18, page 105–106, New York, NY, USA. Association for Computing Machinery. +Fagan, D., Fenton, M., and O’Neill, M. (2016). Exploring position independent initialisation in +grammatical evolution. In 2016 IEEE Congress on Evolutionary Computation (CEC), pages 5060– +5067. +Fenton, M., McDermott, J., Fagan, D., Forstenlechner, S., Hemberg, E., and O’Neill, M. (2017). +Ponyge2: Grammatical evolution in python. In Proceedings of the Genetic and Evolutionary Com- +putation Conference Companion, pages 1194–1201. +Ferguson, A. J., Hernandez, J. G., Junghans, D., Lalejini, A., Dolson, E., and Ofria, C. (2020). Char- +acterizing the effects of random subsampling on lexicase selection. In Banzhaf, W., Goodman, +E., Sheneman, L., Trujillo, L., and Worzel, B., editors, Genetic Programming Theory and Practice +XVII, pages 1–23. Springer International Publishing, Cham. +Forstenlechner, S., Fagan, D., Nicolau, M., and O’Neill, M. (2017). A grammar design pattern +for arbitrary program synthesis problems in genetic programming. In European Conference on +Genetic Programming, pages 262–277. Springer. +Forstenlechner, S., Nicolau, M., Fagan, D., and O’Neill, M. (2016). Grammar design for derivation +tree based genetic programming systems. In European Conference on Genetic Programming, pages +199–214. Springer. +Goings, S., Goldsby, H., Cheng, B. H., and Ofria, C. (2012). +An ecology-based evolutionary +algorithm to evolve solutions to complex problems. In Artificial Life 13, pages 171–177. MIT +Press. +Helmuth, T. and Abdelhady, A. (2020). Benchmarking parent selection for program synthesis by +genetic programming. In Proceedings of the 2020 Genetic and Evolutionary Computation Conference +Companion, pages 237–238, Canc´un Mexico. ACM. +Helmuth, T. and Kelly, P. (2021). PSB2: The second program synthesis benchmark suite. In 2021 +Genetic and Evolutionary Computation Conference, GECCO ’21, Lille, France. ACM. +Helmuth, T. and Kelly, P. (2022). +Applying genetic programming to psb2: The next gen- +eration program synthesis benchmark suite. +Genetic Programming and Evolvable Machines, +23(3):375–404. +Helmuth, T., McPhee, N. F., and Spector, L. (2016). Effects of lexicase and tournament selection +on diversity recovery and maintenance. In Proceedings of the 2016 on Genetic and Evolution- +ary Computation Conference Companion, GECCO ’16 Companion, page 983–990, New York, NY, +USA. Association for Computing Machinery. +Helmuth, T., McPhee, N. F., and Spector, L. (2018). Program synthesis using uniform mutation +by addition and deletion. In Proceedings of the Genetic and Evolutionary Computation Conference, +GECCO ’18, page 1127–1134, New York, NY, USA. Association for Computing Machinery. +Helmuth, T., Pantridge, E., and Spector, L. (2020). On the importance of specialists for lexicase +selection. Genetic Programming and Evolvable Machines, 21(3):349–373. +Helmuth, T. and Spector, L. (2015). General program synthesis benchmark suite. In GECCO +’15: Proceedings of the 2015 conference on Genetic and Evolutionary Computation Conference, pages +1039–1046, Madrid, Spain. ACM. +24 +Preprint + +Informed Down-Sampled Lexicase Selection +Helmuth, T. and Spector, L. (2020). Explaining and exploiting the advantages of down-sampled +lexicase selection. In Artificial Life Conference Proceedings, pages 341–349. MIT Press. +Helmuth, T. and Spector, L. (2021). Problem-solving benefits of down-sampled lexicase selection. +Artificial Life, pages 1–21. +Helmuth, T., Spector, L., and Matheson, J. (2015). Solving uncompromising problems with lexi- +case selection. IEEE Transactions on Evolutionary Computation, 19(5):630–643. +Hernandez, J. G., Lalejini, A., Dolson, E., and Ofria, C. (2019). Random subsampling improves +performance in lexicase selection. In GECCO ’19: Proceedings of the Genetic and Evolutionary +Computation Conference Companion, pages 2028–2031, Prague, Czech Republic. ACM. +Hernandez, J. G., Lalejini, A., and Ofria, C. (2022). An Exploration of Exploration: Measuring the +Ability of Lexicase Selection to Find Obscure Pathways to Optimality. In Banzhaf, W., Trujillo, +L., Winkler, S., and Worzel, B., editors, Genetic Programming Theory and Practice XVIII, pages +83–107. Springer Nature Singapore, Singapore. +Hochbaum, D. S. and Shmoys, D. B. (1985). A best possible heuristic for the k-center problem. +Math. Oper. Res., 10:180–184. +Holland, J. H. (1992). Adaptation in Natural and Artificial Systems: An Introductory Analysis with +Applications to Biology, Control and Artificial Intelligence. MIT Press, Cambridge, MA, USA. +Horn, J., Nafpliotis, N., and Goldberg, D. (1994). A niched Pareto genetic algorithm for multi- +objective optimization. In Proceedings of the First IEEE Conference on Evolutionary Computation. +IEEE World Congress on Computational Intelligence, pages 82–87, Orlando, FL, USA. IEEE. +Krawiec, K., Swan, J., and O’Reilly, U.-M. (2016). +Behavioral Program Synthesis: Insights and +Prospects, pages 169–183. Springer International Publishing, Cham. +La Cava, W., Spector, L., and Danai, K. (2016). Epsilon-lexicase selection for regression. In Pro- +ceedings of the Genetic and Evolutionary Computation Conference 2016, GECCO ’16, page 741–748, +New York, NY, USA. Association for Computing Machinery. +Lalejini, A., Dolson, E., Vostinar, A. E., and Zaman, L. (2022). Artificial selection methods from +evolutionary computing show promise for directed evolution of microbes. eLife, 11:e79665. +Loshchilov, I. and Hutter, F. (2015). Online batch selection for faster training of neural networks. +ArXiv, abs/1511.06343. +Metevier, B., Saini, A. K., and Spector, L. (2019). Lexicase selection beyond genetic programming. +In Banzhaf, W., Spector, L., and Sheneman, L., editors, Genetic Programming Theory and Practice +XVI, pages 123–136. Springer International Publishing, Cham. +Moore, J. M. and Stanton, A. (2017). Lexicase selection outperforms previous strategies for in- +cremental evolution of virtual creature controllers. In Knibbe, C., Beslon, G., Parsons, D. P., +Misevic, D., Rouzaud-Cornabas, J., Bred`eche, N., Hassas, S., 0001, O. S., and Soula, H., ed- +itors, Proceedings of the Fourteenth European Conference Artificial Life, ECAL 2017, Lyon, France, +September 4-8, 2017, pages 290–297. MIT Press. +Paul, M., Ganguli, S., and Dziugaite, G. K. (2021). Deep learning on a data diet: Finding impor- +tant examples early in training. Advances in Neural Information Processing Systems, 34:20596– +20607. +Ruder, S. (2017). An overview of gradient descent optimization algorithms. arXiv:1609.04747 +[cs]. +Ryan, C., Collins, J. J., and Neill, M. O. (1998). Grammatical evolution: Evolving programs for an +arbitrary language. In European conference on genetic programming, pages 83–96. Springer. +Schmidt, M. and Lipson, H. (2005). Co-evolution of fitness maximizers and fitness predictors. +In Rothlauf, F., editor, Late breaking paper at Genetic and Evolutionary Computation Conference +(GECCO’2005), Washington, D.C., USA. +Preprint +25 + +R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector +Schmidt, M. D. and Lipson, H. (2008). Coevolution of fitness predictors. IEEE Transactions on +Evolutionary Computation, 12:736–749. +Smith, R. E., Forrest, S., and Perelson, A. S. (1993). Population diversity in an immune system +model: Implications for genetic search. In WHITLEY, L. D., editor, Foundations of Genetic Algo- +rithms, volume 2 of Foundations of Genetic Algorithms, pages 153–165. Elsevier. +Sobania, D., Briesch, M., and Rothlauf, F. (2022a). Choose your programming copilot: a com- +parison of the program synthesis performance of github copilot and genetic programming. In +Proceedings of the Genetic and Evolutionary Computation Conference, pages 1019–1027. +Sobania, D. and Rothlauf, F. (2020). Challenges of program synthesis with grammatical evolution. +In European Conference on Genetic Programming (Part of EvoStar), pages 211–227. Springer. +Sobania, D. and Rothlauf, F. (2022). Program synthesis with genetic programming: The influence +of batch sizes. In Genetic Programming: 25th European Conference, EuroGP 2022, Held as Part of +EvoStar 2022, Madrid, Spain, April 20–22, 2022, Proceedings, page 118–129, Berlin, Heidelberg. +Springer-Verlag. +Sobania, D., Schweim, D., and Rothlauf, F. (2022b). A comprehensive survey on program syn- +thesis with evolutionary algorithms. IEEE Transactions on Evolutionary Computation. +Spector, L. (2012). Assessment of problem modality by differential performance of lexicase selec- +tion in genetic programming: A preliminary report. In Proceedings of the 14th Annual Conference +Companion on Genetic and Evolutionary Computation, GECCO ’12, page 401–408, New York, NY, +USA. Association for Computing Machinery. +Spector, L., Perry, C., Klein, J., and Keijzer, M. (2004). Push 3.0 programming language descrip- +tion. Technical Report HC-CSTR-2004-02, School of Cognitive Science, Hampshire College, +USA. +Spector, L. and Robinson, A. (2002). Genetic programming and autoconstructive evolution with +the push programming language. Genetic Programming and Evolvable Machines, 3(1):7–40. +Troise, S. A. and Helmuth, T. (2017). Lexicase selection with weighted shuffle. In Banzhaf, W., +Olson, R. S., Tozier, W., and Riolo, R., editors, Genetic Programming Theory and Practice XV, +Genetic and Evolutionary Computation, pages 89–104, University of Michigan in Ann Arbor, +USA. Springer. +Vanneschi, L., Castelli, M., and Silva, S. (2014). A survey of semantic methods in genetic pro- +gramming. Genetic Programming and Evolvable Machines, 15(2):195–214. +Whigham, P. A. et al. (1995). Grammatically-based genetic programming. In Proceedings of the +workshop on genetic programming: from theory to real-world applications, volume 16, pages 33–41. +Citeseer. +Zogaj, F., Cambronero, J. P., Rinard, M. C., and Cito, J. (2021). Doing more with less: characteriz- +ing dataset downsampling for AutoML. Proceedings of the VLDB Endowment, 14(11):2059–2072. +ˇSikulov´a, M. and Sekanina, L. (2012). +Coevolution in Cartesian Genetic Programming. +In +Moraglio, A., Silva, S., Krawiec, K., Machado, P., and Cotta, C., editors, Genetic Programming, +Lecture Notes in Computer Science, pages 182–193, Berlin, Heidelberg. Springer. +26 +Preprint + +Informed Down-Sampled Lexicase Selection +A +Generalization Rates +Table 6: Generalization rate for PushGP. These data indicate the proportion of the runs +that passed the training set that also passed the held out test set. +Method +Lex +Rnd +IDS +Rnd +IDS +r +- +0.05 +0.1 +ρ +- +- +1 +0.01 +0.01 +0.01 +- +1 +0.01 +0.01 +0.01 +k +- +- +1 +1 +10 +100 +- +1 +1 +10 +100 +Count Odds +1.00 +0.96 +0.98 +0.99 +1.00 +0.99 +0.96 +1.00 +0.98 +0.99 +0.99 +Find Pair +1.00 +0.82 +0.82 +0.73 +0.74 +0.80 +0.50 +0.88 +0.79 +0.68 +0.75 +Fizz Buzz +0.93 +0.96 +1.00 +0.93 +0.95 +0.99 +1.00 +1.00 +0.96 +0.96 +0.96 +Fuel Cost +1.00 +1.00 +1.00 +0.99 +0.99 +0.99 +1.00 +1.00 +1.00 +1.00 +1.00 +GCD +0.91 +0.93 +1.00 +0.93 +0.83 +0.87 +0.82 +0.75 +0.80 +0.89 +0.87 +Grade +- +- +- +- +1.00 +- +1.00 +- +- +1.00 +1.00 +Scrabble Score +1.00 +1.00 +1.00 +1.00 +1.00 +1.00 +1.00 +1.00 +0.98 +1.00 +1.00 +Small or Large +0.71 +0.95 +0.80 +0.78 +0.74 +0.71 +0.81 +0.77 +0.69 +0.73 +0.64 +Table 7: Generalization rate for G3P. These data indicate the proportion of the runs that +passed the training set that also passed the held out test set. +Method +Lex +Rnd +IDS +Rnd +IDS +r +- +0.05 +0.1 +ρ +- +- +1 +0.01 +0.01 +0.01 +- +1 +0.01 +0.01 +0.01 +k +- +- +1 +1 +10 +100 +- +1 +1 +10 +100 +Count Odds +0.94 +0.96 +0.96 +0.88 +1.00 +0.96 +1.00 +0.92 +0.95 +0.91 +0.95 +Find Pair +- +- +- +1.00 +- +- +1.00 +- +- +1.00 +- +Fizz Buzz +0.79 +0.87 +0.85 +0.84 +0.78 +0.85 +0.83 +0.82 +0.82 +0.89 +0.73 +Fuel Cost +1.00 +0.97 +1.00 +0.97 +0.96 +1.00 +1.00 +0.96 +0.96 +1.00 +1.00 +GCD +- +0.17 +- +- +- +0.25 +- +- +- +- +- +Grade +0.42 +0.45 +0.50 +0.53 +0.59 +0.45 +0.47 +0.54 +0.47 +0.54 +0.49 +Scrabble Score +1.00 +1.00 +1.00 +1.00 +0.92 +0.83 +1.00 +- +0.86 +1.00 +0.60 +Small or Large +0.47 +0.57 +0.65 +0.56 +0.64 +0.66 +0.68 +0.59 +0.60 +0.579 +0.65 +Preprint +27 + diff --git a/AtAzT4oBgHgl3EQfhv1C/content/tmp_files/load_file.txt b/AtAzT4oBgHgl3EQfhv1C/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..65af7f0c67eca0ee6388ad0d6689f9be0681b3fd --- /dev/null +++ b/AtAzT4oBgHgl3EQfhv1C/content/tmp_files/load_file.txt @@ -0,0 +1,1323 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf,len=1322 +page_content='Informed Down-Sampled Lexicase Selection: Identifying productive training cases for efficient problem solving Ryan Boldi∗ rbahlousbold@umass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='edu University of Massachusetts, Amherst, MA 01003, USA Martin Briesch∗ briesch@uni-mainz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='de Johannes Gutenberg University, Mainz, 55128, Germany Dominik Sobania dsobania@uni-mainz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='de Johannes Gutenberg University, Mainz, 55128, Germany Alexander Lalejini lalejina@gvsu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='edu Grand Valley State University, Allendale, MI 49401, USA Thomas Helmuth thelmuth@hamilton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='edu Hamilton College, Clinton, NY, 13323, USA Franz Rothlauf rothlauf@uni-mainz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='de Johannes Gutenberg University, Mainz, 55128, Germany Charles Ofria ofria@msu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='edu Michigan State University, East Lansing, MI 48824, USA Lee Spector lspector@amherst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='edu Amherst College, Amherst, MA 01002, USA Abstract Genetic Programming (GP) often uses large training sets and requires all individuals to be evaluated on all training cases during selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Random down-sampled lexicase selection evaluates individuals on only a random subset of the training cases allow- ing for more individuals to be explored with the same amount of program executions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, creating a down-sample randomly might exclude important cases from the current down-sample for a number of generations, while cases that measure the same behavior (synonymous cases) may be overused despite their redundancy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In this work, we introduce Informed Down-Sampled Lexicase Selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This method leverages population statistics to build down-samples that contain more distinct and therefore informative training cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Through an empirical investigation across two different GP systems (PushGP and Grammar-Guided GP), we find that informed down-sampling significantly outperforms random down-sampling on a set of contemporary program synthesis benchmark problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Through an analysis of the created down-samples, we find that important training cases are included in the down-sample consistently across independent evolutionary runs and systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We hypothesize that this improvement can be attributed to the ability of Informed Down-Sampled Lexicase Selection to main- tain more specialist individuals over the course of evolution, while also benefiting from reduced per-evaluation costs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Keywords Genetic programming, parent selection algorithms, selection schemes, lexicase selec- tion, down-sampling, informed down-sampling ∗Both authors contributed equally.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ©2022 by the Massachusetts Institute of Technology Preprint arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01488v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='NE] 4 Jan 2023 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector 1 Introduction In Evolutionary Computation, we often use large sets of training data to evaluate the quality of candidate solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For instance, most Genetic Programming (GP) systems evaluate programs using input/output examples (training cases) that specify the ex- pected behavior of a correct program.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Many GP selection strategies aggregate each program’s performance across all training cases to produce one fitness score that can be used for selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In contrast, lexicase selection (Spector, 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2015) avoids aggregation and considers each training case separately, which has been shown to improve diversity maintenance (Helmuth et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Dolson and Ofria, 2018) and problem-solving success across a wide range of domains (Moore and Stanton, 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Metevier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Aenugu and Spector, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ding and Spector, 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, standard lexicase selection has the drawback that we have to evaluate all individuals on all training cases, which can be computationally expensive when eval- uation is non-trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' To reduce lexicase selection’s computational cost, recent work in- troduced down-sampled lexicase selection (Moore and Stanton, 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Hernandez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ferguson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In down-sampled lexicase selection, the training set is ran- domly down-sampled, reducing the number of test case evaluations required to assess the quality of each candidate solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This in turn reduces the cost of evaluating an entire set of individuals, allowing us to reallocate computational resources to other as- pects of an evolutionary search (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', increasing search time or population size), which can substantially improve problem-solving success (Helmuth and Spector, 2020, 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Hernandez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, a naive random down-sample can leave out poten- tially important test cases, resulting in a loss of diversity (Ferguson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Hernandez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In order to put more computational effort towards evaluating individuals on im- portant training cases, we propose informed down-sampling (IDS), which uses runtime population statistics to build a down-sample that contains more distinct cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Given a set of solutions, two training cases are distinct from each other if the sub- sets of solutions that solve each of the two test cases have little-to-no overlap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Two training cases are synonymous if the opposite is true: there is substantial overlap be- tween the subsets of solutions that solve each case*.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Consequently, Informed down- sampling favors the distinct training cases over synonymous cases when building a down-sample to use for selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We expect these informed down-samples to better maintain unique individuals, increasing overall population diversity while also putting more selection pressure on individuals whose descendants are more likely to solve the problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These unique individuals are often viewed as the stepping-stones for evolu- tion to use in finding a perfect solution program (Helmuth et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' To assess the performance of Informed Down-Sampled Lexicase Selection, we compare lexicase selection without down-sampling (standard lexicase), with random down-sampling, and with informed down-sampling across eight problems from the first and second program synthesis benchmark suites (Helmuth and Spector, 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Hel- muth and Kelly, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We conduct our experiments in two independent GP frame- works, Grammar-Guided Genetic Programming (G3P) (Whigham et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 1995;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Forsten- lechner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2016, 2017) and PushGP (Spector and Robinson, 2002;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We find that building a down-sample based on information we collect from the Synonymous cases can also be thought of as cases that have different inputs and outputs yet measure a very similar functionality such that there is a high correlation between individuals’ performance on these cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 2 Preprint Informed Down-Sampled Lexicase Selection population is a valuable way to improve the success rates of evolutionary runs at a fixed computational cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Furthermore, simply tracking which cases are distinct, and ensuring they are placed in a down-sample, can significantly improve problem solving performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Our results provide evidence that informed down-sampling improves the success rate of search in the two GP systems used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' By analyzing the composition of down-samples, we also verify that informed down-sampling builds down-samples that contain more informative test cases (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' edge cases) than random down-sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 2 Related Work In most GP applications, parent selection uses the performance of candidate solutions on a set of training cases to pick individuals that contribute genetic material to the next generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Most selection algorithms aggregate the scores on these training cases to get a single score per candidate and then select the most fit candidates using tournament selection (Brindle, 1980), implicit fitness sharing (Smith et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 1993), fitness proportion- ate selection (Holland, 1992), or another selection strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The fitness aggregation pro- cedure for these methods often results in a loss of semantic information about which training cases the individual performs well on (Krawiec et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2016), motivating the development of selection strategies that consider each individual’s performance on all training cases encountered (Vanneschi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Goings et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Deb et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2002;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Horn et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 1994).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In contrast, lexicase selection does not aggregate fitness or performance measures (Spector, 2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For each parent selection event, the lexicase selection procedure first places all individuals in the population into a “parent pool” (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', the pool of individ- uals eligible to be selected).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' To select a parent, lexicase selection shuffles the training cases into a random ordering, and each training case is considered in sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For each training case, the parent pool is filtered down to just the individuals that have the best (or tie for the best) performance, removing all but the best candidates from further consideration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' If there is only one individual that remains in the pool during this filter- ing process, this individual is selected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' If the training cases are exhausted and there are still individuals in the pool, one of these individuals is selected at random.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Meanwhile, many variants of lexicase selection have been proposed for use in dif- ferent problems or domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For example, epsilon lexicase selection (La Cava et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Moore and Stanton, 2017), batch lexicase selection (Aenugu and Spector, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania and Rothlauf, 2022), gradient lexicase selection (Ding and Spector, 2021), lexi- case selection for GAs (Metevier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2019), weighted shuffle lexicase selection (Troise and Helmuth, 2017), and fast lexicase selection (Ding et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' One of the most promising variants of lexicase selection is down-sampled lexicase selection, which was first proposed for expensive evolutionary robotics runs by Moore and Stanton (2017) and later formalized by Hernandez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2019) for GP runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' So far, down-sampled lexicase selection increased the success and generalization rates for a variety of problems (Ferguson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Down-sampled lexicase selection works by randomly sampling once in each generation the training set to create a smaller set of cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These cases are then used to perform all selection events in the population for that one generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This limitation on the number of test cases reduces the computa- tional costs of evaluating the individuals, which is usually one of the most expensive operations in evolutionary runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These savings could be used to perform computation- ally cheaper GP runs, increase the population size, or run evolution for more genera- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Down-sampled lexicase selection has also been found to significantly outperform Preprint 3 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector regular lexicase selection in a variety of program synthesis benchmarks (Hernandez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ferguson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth and Spector, 2020, 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth and Ab- delhady, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, creating a down-sample randomly can exclude important training cases from the current down-sample for a number of generations (Hernandez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2022), while synonymous cases may be overused.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' As a first attempt at chang- ing the composition of cases in the down-sample, Boldi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022) explored using a rolling down-sample and a disjoint down-sample for lexicase selection runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' While the results were neutral-if-not-negative, they highlighted the presence of synonymous cases in practice and suggest that an attempt at mediating the time put into evaluating individuals on these synonymous cases might improve search performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Work in the EC literature that is related to informed down-sampling primarily includes the co-evolution of fitness predictors and maximizers (Schmidt and Lipson, 2005, 2008;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ˇSikulov´a and Sekanina, 2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' That work attempts to evolve a smaller set of training cases, or fitness predictors, to evaluate the fitness of individuals instead of using the entire training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' While our studied methods do not involve co-evolution, they both result in a compressed training set that is roughly as informative as the set of all available data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Another example is the use of random down-sampling to im- prove performance of AutoML runs that use Genetic Programming (Zogaj et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In the broader machine learning community, random down-sampling is used to gen- erate mini-batches for stochastic gradient descent (Ruder, 2017), and forms of non- random down-sampling are used to detect hard or informative parts of the training data (Loshchilov and Hutter, 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Bachem et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Paul et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Chrysakis and Moens, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 3 Informed Down-Sampling Informed down-sampling addresses randomly down-sampled lexicase’s drawback of sometimes including many synonymous training cases in a down-sample, which is computationally inefficient and can result in a failure to accurately assess candidate so- lution quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For example, down-sampled lexicase selection might fail to select candi- date solutions that specialize on training cases absent from a particular random down- sample, resulting in the loss of potentially important genetic material from the popu- lation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Instead of down-sampling randomly, informed down-sampling creates down- samples composed of more distinct training cases than a random sample would contain using runtime population statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' As a result, we expect informed down-sampling lexicase selection to maintain more diverse populations, while reducing computation spent on evaluating individuals on synonymous training cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We suggest two methods of building an informed down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' First, we explore the idealized effectiveness of informed down-sampling by presenting it with full infor- mation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This method requires evaluating the entire population on all training cases, performing the same number of program executions per generation as normal lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Therefore, informed down-sampling with full information cannot capital- ize on the computational savings afforded by random down-sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, the full information approach provides useful intuition for building an informed down- sample, allowing us to measure the problem-solving success of our sampling approach under idealized conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Next, we present an approach for creating an informed down-sample that reduces the number of per-generation evaluations required for selection (relative to standard lexicase selection).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This second approach, referred to as the “sparse information” ap- proach, estimates the distinctness of training cases based on a sample of individuals 4 Preprint Informed Down-Sampled Lexicase Selection I1 I2 I3 I4 I5 I6 � ����� � ����� S1 0 1 0 1 1 0 S2 1 1 0 0 1 1 S3 1 0 1 1 0 1 S4 0 1 0 0 1 1 S5 0 1 0 1 1 0 Figure 1: Example of the data structure that is used to determine distances between cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' c1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=',5 are cases, with their respective solve vectors S1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=',5, and I1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=',6 are indi- viduals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The entry at Sj and Ii represents whether the ith individual solved the jth test case or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The binary solve vectors Sj can be read off as the respective row for the jth case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The distance between two cases, D(cx, cy), is the Hamming distance between their respective solve vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For example, D(c1, c2) = 3 and D(c2, c3) = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' from the parent population.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Indeed, building an informed down-sample using sparse information results in nearly the same per-generation evaluation savings as when using random down-sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 Building an Informed Down-Sample with Full Information In our informed down-sampling approach with full information, we create one down- sample of training cases per generation, and we use candidate solution performances on only the sampled training cases to choose parents with lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' To con- struct an informed down-sample with full information, we evaluate all members of the population on all training cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In this work, each of these evaluations is on a pass/fail basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Next, we construct the “solve vector” Sj for each training case cj, which is a vec- tor of binary values that specifies which individuals in the population have solved the training case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We then calculate the Hamming distance between solve vectors for all pairs of training cases, allowing us to measure how distinct training cases are relative to one another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We begin constructing the down-sample by randomly selecting an initial training case to include.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Then we find the training case whose solve vector is maximally distant from the closest training case already included in the down-sample, and add it to the down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We repeatedly add training cases to the down-sample in this way until reaching a parameterized sample size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Figure 1 provides an example set of binary solve vectors for a set of five training cases and a population of six individuals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The columns in this matrix Ii describe the performance of the ith individual on all cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A value of 1 at (Ii, cj) implies that the ith individual solved the jth test case (error = 0), or Si j = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Since all members of a population of size p are evaluated on all test cases (at least initially), we can say that ∥Sj∥ = p for all cases, cj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Thus, the number of columns corresponds to the population size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We define the distance between two training cases D(cx, cy) := Hamming(Sx, Sy) where Hamming(·, ·) is the Hamming distance between two vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For binary vec- tors, the distance function is defined as: D(cx, cy) = �p i=1 |Si x − Si y|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Thus, two training cases that are solved by the same set of individuals are deemed to have D(c1, c2) = 0 Preprint 5 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector and are called “synonymous cases”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For example, for the cases in Figure 1, c1 and c5 have identical solve vectors, and therefore are synonymous (D(c1, c5) = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We think of this distance function as indicating the joint information contained in a pair of cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Two cases that have exactly the same individuals solving them (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' are synonymous) have little to no joint information because having both of the cases in the sample would be about as informative as just having one of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Two cases that have a high distance from each other, due to being solved by different subsets of the population, have high joint information as each case is responsible for informing the system about the performance of one set of individuals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Having both of these cases, as opposed to one alone, would be a more faithful approximation of using the full training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Once we have a method to evaluate the pairwise distance between two cases, we can use it to select a down-sample of the training set for use in the current generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In this work, we apply a variant of Farthest First Traversal to select the down-sample (Hochbaum and Shmoys, 1985).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The creation of the down-sample starts with the selec- tion of one random case to include.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Then, at each step, we scan each unselected test case and measure it’s minimum distance to any test in the current down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We select the case that has the largest minimum distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In other words, we successively add the test case that is furthest from the current down-sample at its nearest point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Our Farthest First Traversal algorithm is shown in algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Starting with an empty down-sample, we first add a random case to the down-sample (line 4), and then iteratively add the cases that are maximally far from the closest case to it (5-9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' If there are multiple cases with the same maximum minimum distance, ties are broken randomly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The MinDisti value stores the distance from a given case, ci to the closest case to it in the down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='popMaxMinDistCase() function removes and returns the case in cases that has the maximum value for MinDisti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Note here that it is often the case that the minimum distances all go to zero at a point during the down-sample formation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' At this point, every case left over in the training set has a synonymous case in the down-sample already.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' When this happens, the farthest first procedure will automatically select cases at random from the training set to fill up the required down-sample size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Figure 2 shows an example of performing informed down- sampling with full information using the case solve vectors from Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Algorithm 1 Farthest First Traversal Down-Sample Selection Data: D(·, ·) : D(ci, cj) = D(cj, ci) = distance from case i to case j, r = down-sample rate 1: cases ← set of all cases in training set 2: ds ← empty set ▷ the down-sample 3: size ← r × |cases| ▷ desired size of down-sample 4: ds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='add(cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='popRandomCase()) 5: while ∥ds∥ < size do 6: for every case c in cases do 7: MinDisti ← minimum distance from ci to any case in ds 8: end for 9: ds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='add(cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='popMaxMinDistCase()) 10: end while 11: return ds 6 Preprint Informed Down-Sampled Lexicase Selection D = c1 c2 c3 c4 c5 � ����� � ����� c1 0 3 4 2 0 c2 3 0 4 1 3 c3 4 4 0 5 5 c4 2 1 5 0 2 c5 0 3 5 2 0 Random � �� � ds = {c1} → c3 had max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' distance to c1 � �� � ds = {c1, c3} → c2 had max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' min.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' distance to {c1, c3} � �� � ds = {c1, c3, c2} Figure 2: Example running procedure of informed down-sampling with full informa- tion to pick a down-sample of size 3 (or r = 3 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We have a tabular representation of the distance function D generated by computing the Hamming distance between each pair of cases’ solve vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Beginning with a randomly selected case c1, we se- quentially add the cases that are at the maximum distance to their closest case in the down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The first step is simply finding the case (c3) in the training set with the maximum distance to c1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' To select the next case, we need to find, for c2, c4 and c5, which of c1 and c3 is closest to them, respectively, and then which of those cases is far- thest away.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In this example, c2 was added as it had a higher distance (3) to its closest case than did c4 or c5 (2 and 0, respectively).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Notice that the cases that were left out, c4 and c5, are synonymous or nearly synonymous with cases already in the down-sample: c2 and c1, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='2 Building an Informed Down-Sample with Sparse Information Down-sampled lexicase selection’s problem-solving benefits stem from the computa- tional savings gained by not evaluating the entire population on the whole training set for every generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For a fixed computational budget, down-sampling allows more computational resources to be allocated to other aspects of evolutionary search, such as running for more generations or increasing population size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' As a result, a larger portion of the search space can be explored (Helmuth and Spector, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Informed down-sampling with full information requires the evaluation of all individuals on all training cases in order to construct the down-sample to use in selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This entire pro- cess is counter productive, as we could have just used the initial population evaluation to select individuals and circumvent the entire down-sampling process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The benefit of down-sampling comes from its ability to use sparse information in the individual selec- tion process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Since our aim is to improve on random down-sampling, we must reduce the number of necessary program executions in order to calculate distances between training cases, so that we can benefit from sparse evaluations in both our individual selections and our down-sample creation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We present two methods to decrease the number of evaluations required for the pairwise distance calculation procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The first method, parent sampling, samples a proportion ρ of the parents to evaluate the distances for every generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These parent- samples are evaluated on the entire training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In our runs with a population size of 1000, if we were to randomly sample 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 (or ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01) of these parents to become the parent sample, these 10 parents would be evaluated on all training cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This results in case solve vectors of length 10 that are used to calculate the distances between Preprint 7 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Distances between cases are determined purely based on these parent-sample evaluations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We use the distance matrix generated from these parents to estimate the joint informativeness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The second method, scheduled case distance computation, involves recomputing the distance matrix from the current population every k generations, as opposed to every generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This schedule reduces the amount of computation required for the evalua- tion of case distances even further by not performing it every generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' While such a schedule does not update the distances between cases as often, we still re-sample the down-sample based on these distances every generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Due to the stochastic nature of the down-sample selection process (specifically the random selection of the first case), it is likely that the same down-sample will not be used to evaluate the population in consecutive generations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In combination, parent sampling and scheduled case distance computation allow us to select a down-sample using far less information about individuals while losing only a small amount of information about cases and their similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This technique enables informed down-sampling to explore nearly as many individuals as random down-sampling does.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Putting it all together, the informed down-sampling with sparse information algorithm is detailed in Algorithm 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This algorithm walks through a sin- gle generation’s selection events, returning the parents for the next generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Algorithm 2 Informed Down-Sampling with Sparse Information Data: P : population, cases: set of all training cases, k : scheduled case distance computation parameter, ρ : parent sampling rate, G : current generation counter, D : case distance matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ▷ all distances initialized to be maximally far Result: A list of selected parents 1: if G%k == 0 then 2: ˆP ← sample ρ×|P| parents from P 3: evaluate ˆP on cases 4: calculate D from case solve vectors from solutions in ˆP on cases 5: end if 6: D(·,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ·) ← distance function derived from indexing into D 7: ds ← create downsample using farthest first traversal down-sampling (See Algo 1) 8: P ← select |P| new parents using lexicase selection from P using ds as cases 9: return P 4 Experimental Methods We conducted a series of experiments to study the performance of informed down- sampled lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We compared the performance of informed down-sampled, random down-sampled, and standard lexicase selection on a series of program synthe- sis benchmark problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We performed all experiments in two independent genetic programming systems to show that the findings are robust across different program representations: PushGP and Grammar Guided Genetic Programming (G3P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This section introduces the benchmark problems and genetic programming sys- tems used in our experiments and describes our experimental design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 8 Preprint Informed Down-Sampled Lexicase Selection Table 1: Program synthesis benchmark problems selected from the first and second gen- eral program synthesis benchmark suite, along with their respective input and output types and multiplicities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Problem Suite Input Type Output Type Count Odds PSB1 Vector of Integer Integer Find Pair PSB2 Vector of Integer Two Integers Fizz Buzz PSB2 Integer String Fuel Cost PSB2 Vector of Integer Integer GCD PSB2 Two Integers Integer Grade PSB1 Five Integers String Scrabble Score PSB1 String Integer Small or Large PSB1 Integer String 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 Program Synthesis Benchmark Problems We evaluate each system using eight program synthesis benchmark problems from the first and second general program synthesis benchmark suites (Helmuth and Spector, 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth and Kelly, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These problems are well-studied and are commonly used to compare parent selection algorithms in a GP context (Sobania et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2022b,a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These two benchmark suites include a variety of introductory program synthesis prob- lems that require the manipulation of multiple data types with complex looping or conditional structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Each benchmark problem is defined by a set of input/output examples (referred to as cases) that specify the desired behavior of a correct program.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For each problem, we split the input/output examples into a training set and a testing set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' During evolu- tion, we assessed program quality using only the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We used the testing set to measure how well a program generalized on examples unseen during evolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We consider each input/output example on a pass/fail basis;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' that is, a program passes a test case if it produces the correct output when run with the associated input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A pro- gram is a solution if it passes all of the training cases;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' it generalizes if it passes all training and all testing cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We refer to runs as “success” if they result in the production of a generalizing solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We used the same training and testing data sets across both PushGP and G3P for each problem to ensure the data available is not biasing perfor- mance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Table 1 shows the eight program synthesis benchmark problems that we have cho- sen, along with their input and output types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We selected these particular problems to allow us to test informed down-sampling on a set of easy, medium, and hard problems as established by published success rates using PushGP and random down-sampled lexicase selection (Helmuth and Spector, 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth and Kelly, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We also en- sured that these problems require qualitatively different programmatic paradigms to solve, such as looping and conditional execution (Helmuth and Kelly, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='2 Genetic Programming Systems PushGP is a system that evolves computer programs in the Push programming lan- guage, a stack-based language specifically invented for use in genetic programming (Spector and Robinson, 2002;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Push literals are pushed onto one of a set of datatype specific stacks while instructions are also stored on a stack dur- ing interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These instructions usually act on data from the stacks and leave Preprint 9 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector Table 2: General and System-Specific Evolution Parameters General Parameter Value runs per problem 100 population size 1,000 size of training set 200 size of test set 1,000 program execution limit 60 million maximum number (base) of generations 300 PushGP Parameter Value variation operator UMAD UMAD rate 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 G3P Parameter Value crossover operator subtree crossover crossover probability 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='95 mutation operator subtree mutation mutation steps 1 maximum tree depth 17 elite size 5 initialisation position-independent grow maximum initial tree depth 10 their return value on the stacks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Instructions take values from and return results to the appropriately typed stack, including from and to the instruction stack, allowing for programs to use multiple data types and complex conditional execution paradigms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In this work, we used the propeller implementation of PushGP†.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' G3P uses a context-free grammar in Backus-Naur form to evolve individuals in a desired programming language and supports the use of different data types and con- trol structures (Whigham et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 1995;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Forstenlechner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2016, 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' To prevent the generation of many invalid solutions during search, we use a tree-based representation instead of the common genotype-phenotype mapping known from classical grammat- ical evolution (Ryan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 1998;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania and Rothlauf, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For the implementation of G3P, our code‡ is based on the PonyGE2 framework (Fenton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Table 2 shows the system-specific parameters for PushGP and G3P, and the general parameters that are used in both systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The “runs per problem” parameter refers to the number of independent evolutionary runs that were conducted for each problem and experimental configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The PushGP system uses the uniform mutation by ad- dition and deletion (UMAD) mutation operator (Helmuth et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This UMAD op- erator works with a 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 mutation rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For G3P, we use subtree mutation and crossover, with a crossover probability of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The initialization for G3P is position-independent grow (Fagan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We use grammars based on those provided by the PonyGE2 framework with small adjustments to make them better comparable to the PushGP instructions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' †https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='com/ryanboldi/propeller/releases/tag/Informed-Downsampling ‡https://gitlab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='rlp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='net/mbriesc/informed-down-sampled-lexicase-selection 10 Preprint Informed Down-Sampled Lexicase Selection 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='3 Evaluation and Generation Limits In order to make a fair comparison between methods that perform different numbers of program executions per generation, we use the recommendation from the PSB2 bench- mark suite to limit each GP run to 60 million program executions (Helmuth and Kelly, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Since program executions typically take up the majority of the computational requirements of a GP run, this ensures runs receive similar amounts of computation re- gardless of whether they use down-sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In standard runs using all training cases, the 60 million executions are used by at most 300 generations of a population size of 1000 individuals evaluated on 200 cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' With random down-sampling, we increase the maximum number of generations by the same factor as the down-sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For example, if one tenth of the training data is used in each sample, we can run evolu- tion for ten times the number of generations while keeping the number of individual program executions constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' More generally, if we let G be the maximum number of generations for a run using all training cases, we allow our random down-sampling runs a limit of ˆG generations where ˆG is given by ˆG = G r , where r is the down-sample rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For informed down-sampled lexicase selection the generational limit is calculated by ˆG = G r + ρ(1−r) k , where ρ is the parent sampling rate and k is the parameter for the scheduled case dis- tance computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The exact generational limits for each experimental configuration are shown in table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='§ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='4 Experimental Configurations We explore 11 different configurations of lexicase selection for each problem: standard lexicase selection (Lex), random down-sampled lexicase selection (Rnd), IDS lexicase selection with full information, as well as three sparse information configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' To better match previous literature, all down-sampling methods were performed both with r ∈ {0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Table 3 shows the configurations of the different runs performed in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These runs, due to different generational computational costs, have different genera- tional limits as explained in section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Full information down-sampling is simply using a parent-sample rate of 1, which means that the distances between training cases are determined by all parents’ perfor- mance on every test case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' With this, the quality of the distance metric between two cases is not limited by the parent-sampling or generational gaps we are using to reduce computational load.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Full information down-sampling is included as a control exper- iment to compare with using all cases for selection in standard lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' It is important to note that we run for the same number of generations as with regular lexicase selection because we need to evaluate all parents on all test-cases in order to determine the distances between the cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' §As our implementations evaluate the fitness of individuals in the parent sample twice, we run the IDS with sparse information runs for slightly (< 40) fewer generations to compensate the additional computa- tional effort.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Preprint 11 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector Table 3: Different settings conducted in our experiments for standard lexicase selection (Lex), random down-sampled lexicase selection (Rnd) and informed down-sampled lexicase selection (IDS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The variable r denotes the down-sampling rate, ρ is the parent sampling rate, k is generational interval at which we update the distance matrix and ˆG specifies the maximum number of generations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Method Lex Rnd IDS Rnd IDS r 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ρ 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 k 1 1 10 100 1 1 10 100 ˆG 300 6000 300 5042 5888 5988 3000 300 2752 2973 2997 Finally, the six informed down-sampling methods we have chosen for this work include, for both the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 down-sample rate (r), 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 parent sampling (ρ) rate with a few different distance calculation scheduling (k) parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Through a set of preliminary experiments, the value of ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 for the parent sampling rate was de- termined to be effective while not resulting in too many extra program executions¶.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In conjunction, these hyper-parameters mean that every k generations, 10 parents are used to determine the distances between all training cases, where k ∈ {1, 10, 100}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 5 Results and Discussion We discuss the success rates achieved by both GP systems using standard lexicase se- lection, random down-sampling, and different configurations of IDS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Further, we study how the composition of the down-samples found by IDS change over the number of generations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 Informed Down-Sampling Improves Problem-solving Success Tables 4 and 5 show the success rates for PushGP and G3P respectively on the chosen program synthesis benchmark problems for different parameter configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The success rate is defined as the number of runs that result in a program that passes the complete training set as well as the entire unseen test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For random down-sampling and IDS, we measured solutions on only the down- samples during the actual run.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' As such, we execute these runs to the maximum gener- ational limit, and then conduct a post-hoc analysis to see if any solutions passed all of the training cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' If so, this is the solution that we then evaluate on the unseen test set to determine whether it generalizes or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For all studied configurations, we report success rates based on 100 runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For each benchmark problem, we highlight in bold the best success rate at each of the down- sample sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Problem names in bold are those where an informed down-sampling run outperformed random at both down-sample rates on that problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Problem names that are underlined are those where a random down-sampling run outperformed an informed down-sampling run at both down-sample rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Asterisks signify results that are significantly better than random down-sampling at the same down-sample size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ¶As we are trying to approach the computational savings of random down-sampled lexicase selection, the smaller the value of ρ, the better.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We found that the relatively small value of ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 resulted in sampling that was good enough to determine the joint case information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 12 Preprint Informed Down-Sampled Lexicase Selection Standard lexicase selection was not included in our statistical analyses, as IDS is pre- sented to improve upon random down-sampling at a fixed down-sample size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We per- formed significance analysis with a two proportion z-test and Bonferroni-Holm correc- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Shown with * are those significant at the α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 level, ** the α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 level, and *** the α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For the PushGP results, let us consider the Fizz Buzz problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Standard lexicase selection had 13 successful runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Using random down-sampling at the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 down- sampling rate improved this result to 64, in line with the findings of Helmuth and Spector (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Using the same down-sampling rate with IDS, a 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 parent rate, and k = 100 yielded 95 successful runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This is significantly better than random down- sampling at the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This is an important result as IDS is significantly improving on random down-sampling, which in turn improves on lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Another set of PushGP IDS runs where we observed significant improvements were those of the Count Odds problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' While standard lexicase selection achieves 24 successes, random down-sampling at either down-sample rate (r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 or r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1) does not produce more than 26 successful runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The failure to meaningfully improve success rates by random down-sampling seemed to be addressed by informed down-sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This is clear as informed down-sampling at all configurations ensures that close to if-not- all 100 runs successfully generalize to the held out test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This and similar results hint that while randomly down-sampled lexicase selection works well usually, there are some problems where important cases might be being dropped out, resulting in a similar performance to standard lexicase selection despite the increased search gener- ations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Informed down-sampling has the ability to improve success rates both when random down-sampling improves upon standard lexicase selection, and when it does not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Only one configuration of G3P resulted in a significant improvement on random down-sampling at the same down-sample rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For the Grade problem at the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 down-sample rate, we see significantly more successes when using IDS with ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 and k = 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For this problem, using this informed down-sample configuration re- sulted in 57% of the runs yielding a generalizing solution, where, using random down- sampling resulted in only 39% of the runs yielding a success.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The fact that only a single configuration of IDS resulted in a significant improvement suggests that the problem- solving benefits of using IDS are representation- and problem-dependent, motivating future work to continue improving IDS to achieve more universal improvements to problem-solving success.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We have a number of hypotheses explaining this improved performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The first of these is that the informed down-sampling procedure increases the number of spe- cialists (individuals exceptional on a few cases, but have a high total error) that survive over the course of evolutionary time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These individuals could be better maintained with IDS as the cases they are exceptional on are still placed in the down-samples throughout evolution, preventing them from being lost as could happen when ran- domly down-sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Another hypothesis for IDS’s improved performance is that it reduces the compu- tation used to evaluate individuals on synonymous cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' When two cases are fully synonymous, all individuals that solve one case solve the other as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' When using lexicase selection, having both of these cases in the down-sample would result in little difference in the probability of selecting each individual compared to having only one case in the down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' After one of the two cases has been used to filter the pool of candidate solutions, the other will have no filtering pressure because all remaining Preprint 13 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector Table 4: Number of generalizing solutions (successes) out of 100 runs achieved by PushGP on the test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Method Lex Rnd IDS Rnd IDS r 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ρ 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='k 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='100 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Count Odds ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='24 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='43*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='100*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='98*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='26 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='55*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='95*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='97*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Find Pair ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='27 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='15 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='19 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='19 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='21 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Fizz Buzz ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='85*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='94*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='95*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='75 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='78* ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='81** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Fuel Cost ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='41 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='72 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='83 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='85 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='83 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='76 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='69 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='72 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='GCD ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='74 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='76 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='67 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='69 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='54 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='63 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='62 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Grade ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Scrabble Score ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='69*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='64*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='75*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='16 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='55*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='74*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='64*** ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Small or Large ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='93 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='37 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='69 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='69 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='69 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='69 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='39 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='66 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='54 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='14 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Preprint ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Informed Down-Sampled Lexicase Selection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='Table 5: Number of generalizing solutions (successes) out of 100 runs achieved by G3P on the test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Method Lex Rnd IDS Rnd IDS r 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ρ 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 k 1 1 10 100 1 1 10 100 Count Odds 65 66 45 53 62 63 67 58 60 58 72 Find Pair 0 0 0 1 0 0 1 0 0 1 0 Fizz Buzz 62 83 50 84 78 85 78 53 81 89 72 Fuel Cost 33 34 17 28 27 29 29 21 21 25 33 GCD 0 1 0 0 0 1 0 0 0 0 0 Grade 36 39 29 51 57* 44 44 37 46 51 48 Scrabble Score 6 10 1 11 10 10 14 0 6 3 3 Small or Large 41 52 49 54 63 63 59 52 57 55 63 Preprint 15 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector individuals perform identically on the synonymous cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Having a synonymous case does increase the chance that one of the two cases appears earlier in the shuffled case ordering, producing a minor (though perhaps undesired) change in selection proba- bility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Synonymous (or near synonymous) cases additionally take spots in the down- sample that cannot be allocated to other, more-informative cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' When using IDS, we ensure that the first few cases added to the down-sample measure relatively different behaviors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This may allow IDS to select a larger variety of individuals than random down-sampling, instead approximating the variety that could be selected by full lexi- case selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These results, in general, make it clear that informed down-sampling by farthest first traversal is significantly outperforming randomly down-sampled lexicase selec- tion on a portion of these program synthesis benchmark problems for the PushGP evolutionary framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The G3P results are less clearly in favor of informed down- sampling, but still point to minor improvements in success rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' It is important to note that all of our down-sampled runs (besides full-information) consistently and sig- nificantly outperform standard lexicase selection, which has in turn been shown to significantly outperform other selection strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This result agrees with that of Hel- muth and Abdelhady (2020), showing down-sampled lexicase selection being, before this work, the state of the art in program synthesis with genetic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Our in- formed down-sampling runs outperform random down-sampling (higher success rate for both down-sample rates) on 6/8 of the problems we studied for PushGP, with 3/8 of them being statistically significant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For G3P, informed down-sampling improves on 3/8 problems, with 1/8 being significant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Random down-sampling outperformed informed down-sampling (across both down-sampling levels) on only one problem (Small or Large) for PushGP, and none for G3P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For Small or Large with PushGP, we see that the worse performance with informed down-sampling can be attributed to a lower generalization rate (and not worse perfor- mance on the training sets).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The generalization rates can be found in Appendix Figure 6 for PushGP and Appendix Figure 7 for G3P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Future work should explore the effect that informed down-sampling has on generalization in more depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='2 Using Smaller Informed Down-Samples Tends to Improve Success Rates In general, our IDS runs at a 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 down-sample rate have a higher success rate than their equivalent counterparts at the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 down-sample rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This difference is likely due to the fact that the runs at a 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 down-sample rate have a substantially lower genera- tional limit, meaning that we are exploring a smaller portion of the space of possible solution programs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' With 200 training cases, our down-sample contains 10 and 20 cases respectively for the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 down-sample rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A possible reason for the improved performance at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 is that a larger proportion of these cases are indeed our distinct, or informative, cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Note that once the Farthest First Traversal process selects a rep- resentative case for every synonymous group in the down-sample, every remaining solution’s minimum distances to the current sample will be equal to 0, so the selections are performed randomly to fill the rest of the cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Since we are using the same prob- lems, with the same number of behavioral niches, we will see the runs with 20 cases in the down-sample having more synonymous cases in the down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Due to the fact that the content of the training cases is not notably more informative to make up for the decreased generational limit, we see a lower success rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We will analyze the specific cases that compose the down-samples in section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The exceptions to this trend are the full information down-sampling runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For 16 Preprint Informed Down-Sampled Lexicase Selection these runs, the larger down-samples tend to perform better.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This result is likely due to the fact that the generational limit was set to 300 for both sampling levels (as they both evaluate all individuals on all test cases), and so having a smaller down-sample size would not change the number of evaluations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' With more cases in the sample, the GP method can take into account more information when performing selection, which could result in more informed search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The magnitude of the differences for success rate across sample size for the full IDS runs suggests that there are diminishing returns for including more cases in the sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='3 Informed Down-Sampling Automatically Discovers Important Training Cases To gain a deeper insight into how IDS composes down-samples, we visualize how the selected training cases (used for a down-sample) develop over the generations of an evolutionary run.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Figures 3 and 4 show the composition of down-samples for every problem at every generation using PushGP (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 3) and G3P (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 4) with down-sample rate r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We present results for a full information configuration (ρ = 1 and k = 1) as well as a sparse information configuration (ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 and k = 10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We chose to analyze both a full information and sparse information run in order to see whether our sparse information configurations are finding the same training cases to be informative as if we had used all parents to evaluate the distances between training cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The plots show how often certain training cases are included in the down-sample at every generation, averaged over all active runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Each row represents a case in the training data, ordered by its position in the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The training sets used were generated by first adding some human-expert defined edge cases, and filling the rest with cases that were randomly generated by an function that already implements our desired program (oracle function).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For each figure, there is a single marker on the y- axis that shows where exactly the expert-case cutoff for the training set was.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Thus, the rows above the marker in the visuals are representing cases that humans determined to be important based on the problem definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Brighter colors imply that a case is included more often, darker colors imply a lower number of inclusions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For PushGP (Figure 3), we see that the configurations with sparse information of- ten include the same cases in the down-sample as the runs with full information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This result means that by using a parent sampling rate of ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 and a case distance evaluation schedule parameter of k = 10, we can significantly reduce the number of evaluations needed to calculate distances between cases, while still maintaining a good approximation to the ground truth (full information, where we use all parents every generation to calculate distances).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, the composition for our sparse informa- tion runs are slightly more noisy than that for full information, suggesting that using parent sampling could introduce some extra stochasticity to the down-sample creation process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For all studied benchmark problems, we see that IDS has a strong bias toward specific training cases that are included substantially more often in the down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These selected training cases are mainly consistent with the human-defined edge cases that exist at the beginning of the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This result shows that informed down- sampling is indeed often finding the same cases to be informative as those that a human expert would, without any knowledge of the problem definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, with IDS, we can draw further comparisons of informativeness within this expert-defined groups of cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This can be seen as some cases are selected more often that others within the Preprint 17 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector Full Information Sparse Information Count Odds Cases Find Pair Cases Fizz Buzz Cases Generations Fuel Cost Cases Generations Figure 3: Down-sample composition over generations for PushGP with 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 down- sample rate for a full information (ρ = 1 and k = 1) and a sparse information configu- ration (ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 and k = 10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' first several cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We then look at the labels of the specific training cases that are found to be impor- tant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We see that these training cases make sense to be included more often than others in the down-samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Note that the labels of the specific training cases are not included 18 Preprint Informed Down-Sampled Lexicase Selection Full Information Sparse Information GCD Cases Grade Cases Scrabble Score Cases Generations Small or Large Cases Generations Figure 3: Continued.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' in the plots for simplicity, but can be queried based on their specific index in the data sets provided in our code implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For example, for the Small or Large problem, cases around the decision boundaries as well as numbers between 0 and 1000 are more often included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For the Grade problem, those edge cases with very close decision boundaries are included while the ones with far away boundaries are not taken into account for the down-sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For Fuel Cost, Preprint 19 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector Full Information Sparse Information Count Odds Cases Find Pair Cases Fizz Buzz Cases Generations Fuel Cost Cases Generations Figure 4: Down-sample composition over generations for G3P with 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 down-sample rate for a full information (ρ = 1 and k = 1) and a sparse information configuration (ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 and k = 10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' nearly all of the human defined edge cases are found to be important, while for the GCD problem the first two cases in particular make it in nearly every down-sample, while the rest are selected less often.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 20 Preprint Informed Down-Sampled Lexicase Selection Full Information Sparse Information GCD Cases Grade Cases Scrabble Score Cases Generations Small or Large Cases Generations Figure 4: Continued.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For the Scrabble Score problem, we see that the first edge cases, which specify the score for each letter, does not seem to be informative at all.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This result is not surprising, as this information is already available to PushGP through a vector with these scores on the vector stack.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, the three edge cases after them with empty strings and special characters as input are included a lot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For Count Odds, the edge cases denot- ing empty lists, or lists with zero or a single odd number were found to be important, Preprint 21 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector indicating that those contain all the important information to learn what are odd and even numbers as well as how to handle a list.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For Fizz Buzz, all edge cases seem im- portant while for the Find Pair problem only those edge cases with lists of length 3 are consistently included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Those lists of length 2 in the edge cases are represented in the down-sample less often.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lastly, we see that the composition of the down-sample stays rather stable during the evolutionary run for the PushGP system, explaining why there is only a small dif- ference in our experiments between calculating the distances every k = 1 and k = 100 generations (see Table 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' For G3P (Fig 4), we see similar results as with PushGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, for the prob- lems that require iterative structures to be solved (Count Odds, Find Pair) we see that the down-sample quickly dissolves into random noise instead of any form of struc- ture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This dynamic occurs despite the fact that the same edge cases as with PushGP are initially identified in the first few generations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This result is not surprising as finding it- erative structures is known to be challenging for grammar-guided approaches, as such structures are difficult to be built step-by-step guided by the performance on a set of training cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (Sobania and Rothlauf, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Another difference between the case compositions are that, while IDS with G3P tends to discover the same cases as those found with PushGP, their use is less consistent, resulting in lines that are more faint than those for PushGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Both of these hypotheses could help explain the relatively worse improvement that IDS yields for G3P than for PushGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' However, for the problems that require conditionals, like Small or Large and Grade, we see that the important cases are identified and used during evolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This result is also reflected in the success rates compared to random down-sampling (see Table 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Interestingly, IDS identifies many of the same cases as important for G3P as well as PushGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This result suggests that the structure of the problem itself determines which cases are important rather than the considered representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This dynamic makes IDS potentially useful across many different systems and approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 6 Conclusion and Future work In this work, we proposed a novel approach to construct down-samples in an informed manner during evolution when using down-sampled lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We find that changing the composition of down-samples to include cases that are more “informa- tive” helps improve problem solving performance with a fixed computational bud- get.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Informativeness, we hypothesize, is linked to how distinct the cases in the down- sample are.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Cases that are solved by the same subset of the population are likely testing for the same behavior, and thus need not be included in the down-sample at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Cases that test for different behaviors likely maintain different behavioral groups of individuals, which could promote and maintain higher levels of diversity in the pop- ulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In our empirical comparisons of these down-sampling methods, we find evidence to support the conclusion that selecting cases in an informed manner increases the suc- cess rate of GP runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These results were confirmed across two independent GP systems by using well studied benchmark problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' We find that using IDS often increases the proportion of informative cases in the down-sample as verified by improved success rates as well as by directly inspecting the content of the down-samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' IDS improves upon the state of the art selection method across the majority of the program synthesis problems explored in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This work is a first exploration into changing the case composition of down- 22 Preprint Informed Down-Sampled Lexicase Selection samples for lexicase selection runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' As such, it opens many potential directions for future research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Due to the modular nature of the informed down-sampling system, different methods could be used for either the pairwise information measurement, or for the down-sample creation portions of the algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' An exploration into differ- ent down-sampling levels, and the effects levels have on the informational content of down-samples is also a promising direction for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Additionally, IDS intro- duces new hyperparameters for the parent sampling rate and generational schedule;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' it would be beneficial to create a method for automatically setting these dependant on the problem and the state of the GP search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Finally, even though there are reasons to believe that IDS and down-sampling in general work well with lexicase selection, there is nothing that ties them to a particular selection method;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' it may be informative to explore the effects of IDS on other parent selection methods such as tournament selec- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Finally, comparing the extent to which different down-sampling strategies blunt lexicase’s ability to maintain specialists could also yield important insights into why informed down-sampling improves success rates as much as it does.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 7 Acknowledgements This material is based upon work supported by the National Science Foundation un- der Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 1617087.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Any opinions, findings, and conclusions or recommendations expressed in this publication are those of the authors and do not necessarily reflect the views of the National Science Foundation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' This work was performed in part using high performance computing equipment obtained under a grant from the Collaborative R&D Fund managed by the Mas- sachusetts Technology Collaborative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Parts of this research were conducted using the supercomputer Mogon and/or ad- visory services offered by Johannes Gutenberg University Mainz (hpc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='uni-mainz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='de), which is a member of the AHRP (Alliance for High Performance Computing in Rhineland Palatinate, www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='ahrp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='info) and the Gauss Alliance e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' The authors would like to thank Anil Saini, Austin Ferguson, Cooper Sigrist, Con- stantin Weiser, Edward Pantridge, Jose Hernandez, Li Ding and the Members of the PUSH lab at Amherst College for discussions that helped shape this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' References Aenugu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lexicase selection in learning classifier systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the Genetic and Evolutionary Computation Conference, GECCO ’19, page 356–364, New York, NY, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Bachem, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Lucic, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Krause, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Practical coreset constructions for machine learn- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' arXiv: Machine Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Exploring Environmental Change for Down- Sampled Lexicase Selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' volume Why it Didn’t Work-Shop of ALIFE 2022: The 2022 Con- ference on Artificial Life.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Brindle, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Genetic algorithms for function optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' PhD thesis, University of Alberta.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Chrysakis, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Moens, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='-F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Online continual learning from imbalanced data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In III, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Singh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', editors, Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 1952–1961.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' PMLR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Deb, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Pratap, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Agarwal, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Meyarivan, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A fast and elitist multiobjective genetic algorithm: NSGA-II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' IEEE Transactions on Evolutionary Computation, 6(2):182–197.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Preprint 23 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector Ding, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Boldi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lexicase selection at scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Genetic and Evolutionary Computation Conference Companion (GECCO ’22 Companion), July 9–13, 2022, Boston, MA, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ding, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Optimizing neural networks with gradient lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In International Conference on Learning Representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Dolson, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Ofria, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ecological theory provides insights about evolutionary compu- tation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the Genetic and Evolutionary Computation Conference Companion, GECCO ’18, page 105–106, New York, NY, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Fagan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Fenton, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and O’Neill, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Exploring position independent initialisation in grammatical evolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In 2016 IEEE Congress on Evolutionary Computation (CEC), pages 5060– 5067.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Fenton, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', McDermott, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Fagan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Forstenlechner, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Hemberg, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and O’Neill, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ponyge2: Grammatical evolution in python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the Genetic and Evolutionary Com- putation Conference Companion, pages 1194–1201.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ferguson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Hernandez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Junghans, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Lalejini, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Dolson, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Ofria, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Char- acterizing the effects of random subsampling on lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Banzhaf, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Goodman, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Sheneman, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Trujillo, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Worzel, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', editors, Genetic Programming Theory and Practice XVII, pages 1–23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer International Publishing, Cham.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Forstenlechner, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Fagan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Nicolau, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and O’Neill, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A grammar design pattern for arbitrary program synthesis problems in genetic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In European Conference on Genetic Programming, pages 262–277.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Forstenlechner, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Nicolau, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Fagan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and O’Neill, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Grammar design for derivation tree based genetic programming systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In European Conference on Genetic Programming, pages 199–214.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Goings, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Goldsby, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Cheng, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Ofria, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' An ecology-based evolutionary algorithm to evolve solutions to complex problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Artificial Life 13, pages 171–177.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' MIT Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Abdelhady, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Benchmarking parent selection for program synthesis by genetic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the 2020 Genetic and Evolutionary Computation Conference Companion, pages 237–238, Canc´un Mexico.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ACM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Kelly, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' PSB2: The second program synthesis benchmark suite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In 2021 Genetic and Evolutionary Computation Conference, GECCO ’21, Lille, France.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ACM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Kelly, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Applying genetic programming to psb2: The next gen- eration program synthesis benchmark suite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Genetic Programming and Evolvable Machines, 23(3):375–404.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', McPhee, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Effects of lexicase and tournament selection on diversity recovery and maintenance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the 2016 on Genetic and Evolution- ary Computation Conference Companion, GECCO ’16 Companion, page 983–990, New York, NY, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', McPhee, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Program synthesis using uniform mutation by addition and deletion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the Genetic and Evolutionary Computation Conference, GECCO ’18, page 1127–1134, New York, NY, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Pantridge, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' On the importance of specialists for lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Genetic Programming and Evolvable Machines, 21(3):349–373.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' General program synthesis benchmark suite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In GECCO ’15: Proceedings of the 2015 conference on Genetic and Evolutionary Computation Conference, pages 1039–1046, Madrid, Spain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ACM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 24 Preprint Informed Down-Sampled Lexicase Selection Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Explaining and exploiting the advantages of down-sampled lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Artificial Life Conference Proceedings, pages 341–349.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' MIT Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Problem-solving benefits of down-sampled lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Artificial Life, pages 1–21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Matheson, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Solving uncompromising problems with lexi- case selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' IEEE Transactions on Evolutionary Computation, 19(5):630–643.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Hernandez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Lalejini, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Dolson, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Ofria, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Random subsampling improves performance in lexicase selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In GECCO ’19: Proceedings of the Genetic and Evolutionary Computation Conference Companion, pages 2028–2031, Prague, Czech Republic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ACM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Hernandez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Lalejini, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Ofria, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' An Exploration of Exploration: Measuring the Ability of Lexicase Selection to Find Obscure Pathways to Optimality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Banzhaf, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Trujillo, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Winkler, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Worzel, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', editors, Genetic Programming Theory and Practice XVIII, pages 83–107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer Nature Singapore, Singapore.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Hochbaum, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Shmoys, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (1985).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A best possible heuristic for the k-center problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Oper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 10:180–184.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Holland, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (1992).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Adaptation in Natural and Artificial Systems: An Introductory Analysis with Applications to Biology, Control and Artificial Intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' MIT Press, Cambridge, MA, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Horn, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Nafpliotis, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Goldberg, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (1994).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A niched Pareto genetic algorithm for multi- objective optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the First IEEE Conference on Evolutionary Computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' IEEE World Congress on Computational Intelligence, pages 82–87, Orlando, FL, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Krawiec, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Swan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and O’Reilly, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Behavioral Program Synthesis: Insights and Prospects, pages 169–183.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer International Publishing, Cham.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' La Cava, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Danai, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Epsilon-lexicase selection for regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Pro- ceedings of the Genetic and Evolutionary Computation Conference 2016, GECCO ’16, page 741–748, New York, NY, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Dolson, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Vostinar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Zaman, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Artificial selection methods from evolutionary computing show promise for directed evolution of microbes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' eLife, 11:e79665.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Loshchilov, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Hutter, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Online batch selection for faster training of neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ArXiv, abs/1511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='06343.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Metevier, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Saini, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lexicase selection beyond genetic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Banzhaf, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Sheneman, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', editors, Genetic Programming Theory and Practice XVI, pages 123–136.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer International Publishing, Cham.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Moore, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Stanton, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lexicase selection outperforms previous strategies for in- cremental evolution of virtual creature controllers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Knibbe, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Beslon, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Parsons, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Misevic, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Rouzaud-Cornabas, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Bred`eche, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Hassas, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', 0001, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Soula, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', ed- itors, Proceedings of the Fourteenth European Conference Artificial Life, ECAL 2017, Lyon, France, September 4-8, 2017, pages 290–297.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' MIT Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Paul, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Ganguli, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Dziugaite, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Deep learning on a data diet: Finding impor- tant examples early in training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 34:20596– 20607.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ruder, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' An overview of gradient descent optimization algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' arXiv:1609.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='04747 [cs].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ryan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Collins, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Neill, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (1998).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Grammatical evolution: Evolving programs for an arbitrary language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In European conference on genetic programming, pages 83–96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Schmidt, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Lipson, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Co-evolution of fitness maximizers and fitness predictors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Rothlauf, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', editor, Late breaking paper at Genetic and Evolutionary Computation Conference (GECCO’2005), Washington, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Preprint 25 R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Boldi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Briesch, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lalejini, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Helmuth, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Rothlauf, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Ofria and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector Schmidt, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Lipson, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Coevolution of fitness predictors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' IEEE Transactions on Evolutionary Computation, 12:736–749.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Smith, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Forrest, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Perelson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (1993).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Population diversity in an immune system model: Implications for genetic search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In WHITLEY, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', editor, Foundations of Genetic Algo- rithms, volume 2 of Foundations of Genetic Algorithms, pages 153–165.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Elsevier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Briesch, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Rothlauf, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Choose your programming copilot: a com- parison of the program synthesis performance of github copilot and genetic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the Genetic and Evolutionary Computation Conference, pages 1019–1027.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Rothlauf, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Challenges of program synthesis with grammatical evolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In European Conference on Genetic Programming (Part of EvoStar), pages 211–227.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Rothlauf, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Program synthesis with genetic programming: The influence of batch sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Genetic Programming: 25th European Conference, EuroGP 2022, Held as Part of EvoStar 2022, Madrid, Spain, April 20–22, 2022, Proceedings, page 118–129, Berlin, Heidelberg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer-Verlag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Sobania, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Schweim, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Rothlauf, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A comprehensive survey on program syn- thesis with evolutionary algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' IEEE Transactions on Evolutionary Computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Assessment of problem modality by differential performance of lexicase selec- tion in genetic programming: A preliminary report.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the 14th Annual Conference Companion on Genetic and Evolutionary Computation, GECCO ’12, page 401–408, New York, NY, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Perry, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Klein, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Keijzer, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Push 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='0 programming language descrip- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Technical Report HC-CSTR-2004-02, School of Cognitive Science, Hampshire College, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Spector, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Robinson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Genetic programming and autoconstructive evolution with the push programming language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Genetic Programming and Evolvable Machines, 3(1):7–40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Troise, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Helmuth, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Lexicase selection with weighted shuffle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Banzhaf, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Olson, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Tozier, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Riolo, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', editors, Genetic Programming Theory and Practice XV, Genetic and Evolutionary Computation, pages 89–104, University of Michigan in Ann Arbor, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Vanneschi, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Castelli, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Silva, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A survey of semantic methods in genetic pro- gramming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Genetic Programming and Evolvable Machines, 15(2):195–214.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Whigham, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Grammatically-based genetic programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Proceedings of the workshop on genetic programming: from theory to real-world applications, volume 16, pages 33–41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Citeseer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Zogaj, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Cambronero, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Rinard, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Cito, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Doing more with less: characteriz- ing dataset downsampling for AutoML.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Proceedings of the VLDB Endowment, 14(11):2059–2072.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' ˇSikulov´a, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' and Sekanina, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Coevolution in Cartesian Genetic Programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' In Moraglio, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Silva, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Krawiec, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', Machado, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', and Cotta, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=', editors, Genetic Programming, Lecture Notes in Computer Science, pages 182–193, Berlin, Heidelberg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' 26 Preprint Informed Down-Sampled Lexicase Selection A Generalization Rates Table 6: Generalization rate for PushGP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These data indicate the proportion of the runs that passed the training set that also passed the held out test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Method Lex Rnd IDS Rnd IDS r 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ρ 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 k 1 1 10 100 1 1 10 100 Count Odds 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99 Find Pair 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='73 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='74 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='88 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='79 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='75 Fizz Buzz 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='93 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='93 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='95 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 Fuel Cost 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='99 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 GCD 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='91 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='93 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='93 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='83 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='87 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='87 Grade 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 Scrabble Score 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='98 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 Small or Large 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='71 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='95 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='78 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='74 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='71 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='81 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='77 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='69 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='73 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='64 Table 7: Generalization rate for G3P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' These data indicate the proportion of the runs that passed the training set that also passed the held out test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content=' Method Lex Rnd IDS Rnd IDS r 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='1 ρ 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='01 k 1 1 10 100 1 1 10 100 Count Odds 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='94 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='88 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='92 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='95 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='91 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='95 Find Pair 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 Fizz Buzz 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='79 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='87 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='85 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='78 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='85 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='83 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='73 Fuel Cost 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='97 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='97 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='96 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 GCD 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='25 Grade 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='59 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='49 Scrabble Score 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='92 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='83 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='86 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='60 Small or Large 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='57 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='56 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='64 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='66 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='59 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='579 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} +page_content='65 Preprint 27' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/AtAzT4oBgHgl3EQfhv1C/content/2301.01488v1.pdf'} diff --git a/C9E4T4oBgHgl3EQfFwy_/vector_store/index.pkl b/C9E4T4oBgHgl3EQfFwy_/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c4eb48927b6a7898511f13126cd665d56471a2bc --- /dev/null +++ b/C9E4T4oBgHgl3EQfFwy_/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:793cd509b59ddeeab47be342607d90d26ed2afeacd9c10d5fbc0945581a0c471 +size 129855 diff --git a/CtE0T4oBgHgl3EQfQQAs/content/2301.02189v1.pdf b/CtE0T4oBgHgl3EQfQQAs/content/2301.02189v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..252a62b76434c34b2acbadafbaf543e29e1bdc6f --- /dev/null +++ b/CtE0T4oBgHgl3EQfQQAs/content/2301.02189v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb54594044be1479cd21fcecb615347004f6880d38076ab335d1986ec0daa478 +size 6212423 diff --git a/CtE0T4oBgHgl3EQfQQAs/vector_store/index.faiss b/CtE0T4oBgHgl3EQfQQAs/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..a9c03a9b6dcf3778a481e38038ab103bd96dfdf3 --- /dev/null +++ b/CtE0T4oBgHgl3EQfQQAs/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f160ec6ed32e013d0d8fcd69127b9576bb23a4f041f1be7ff47842e49ffaf282 +size 13500461 diff --git a/DNE4T4oBgHgl3EQfGAw7/content/tmp_files/2301.04890v1.pdf.txt b/DNE4T4oBgHgl3EQfGAw7/content/tmp_files/2301.04890v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..ef7cfa73f72e7c691336a20e0f9a18930d0561b9 --- /dev/null +++ b/DNE4T4oBgHgl3EQfGAw7/content/tmp_files/2301.04890v1.pdf.txt @@ -0,0 +1,3465 @@ +arXiv:2301.04890v1 [math.PR] 12 Jan 2023 +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS +ON SPATIAL RANDOM GRAPHS +VINCENT BANSAYE AND MICHELE SALVI +Abstract. Consider a graph where the sites are distributed in space according to a Pois- +son point process on Rn. We study a population evolving on this network, with individuals +jumping between sites with a rate which decreases exponentially in the distance. Individuals +give also birth (infection) and die (recovery) at constant rate on each site. First, we con- +struct the process, showing that it is well-posed even when starting from non-bounded initial +conditions. Secondly, we prove hydrodynamic limits in a diffusive scaling. The limiting pro- +cess follows a deterministic reaction diffusion equation. We use stochastic homogenization +to characterize its diffusion coefficient as the solution of a variational principle. The proof +involves in particular the extension of a classic Kipnis–Varadhan estimate to cope with the +non-reversiblity of the process, due to births and deaths. This work is motivated by the +approximation of epidemics on large networks and results are extended to more complex +graphs including percolation of edges. +Key words: Epidemics, branching process, random graphs, stochastic homogenization. +MSC 2020: 92D25, 05C81, 35B27. +1. Introduction and main results +Consider a graph G whose vertices V are placed according to a Poisson point process on +Rn with n ≥ 2 and with edge set E drawn from some distribution. Attach to each unoriented +edge {x, y} ∈ E a rate r(x, y) = r(y, x) = e−∥x−y∥, where ∥·∥ indicates the Euclidean distance. +Consider individuals that perform independent random walks on G with jump rates r(x, y). +They also give birth to new individuals at rate b ≥ 0 and die at rate d ≥ 0. The main goal of +the present work is to describe the limiting behaviour of this particle system under a diffusive +rescaling. +The motivation for studying this kind of process comes from the analysis of real-world +networks with agents moving on spatially inhomogeneous structures. Metapopulation mod- +els (or metacommunity for several species) aim at describing the habitat of a population as +a collection of patches. Exchanges between two patches can depend on several features, in +particular the distance. +From the pioneering works of Levins [Levins(1969)], metapopula- +tions have a long story in biology and ecology. Issues come from conservation of species (see +e.g. [Bascompte and Sole(1996), Bansaye and Lambert(2013)]), evolution of dispersion (see +e.g. [Hastings(1983)]), impact of fragmentation of habitats (see e.g. [Hiebeler(2000)]) and ef- +fect of heterogeneity of habitats (see e.g. [Pulliam(1988)]). While for the sake of simplicity +one would consider a small number of patches, applications often ask for the study of large +metapopulations. +As far as we know the literature, large metapopulations are considered +either in a mean field approximation (see [Levins(1969)]) or with a spatially explicit large +structure using cellular automates and simulations ([Bascompte and Sole(1996)]) or in a peri- +odic environment. Random networks provide a relevant mathematical framework to analyze +1 + +2 +V. BANSAYE AND M. SALVI +models which do not fall in the mean field approximation and that would make explode the +parameters’ complexity if considered as large explicit graphs. Rigorous works which combine +motion and demography (birth, death, infections...) on large random landscapes are rare for +now. Our interest is understanding how an epidemics would spread on such structures. As +a driving example, one can consider the spread of an infection among cattle on the French +network of farms, see [Qi et al.(2019)Qi, Beaunée, Arnoux, Dutta, Joly, Vergu, and Ezanno]. +In this first work we identify the diffusive behaviour of an epidemics in its first stages, which +corresponds to the classic branching process approximation for small ratio of infected indi- +viduals. In this case b represents the contamination rate and d is the recovery rate for the +infected population. This approximation is valid on a time window where the infected popula- +tion remains locally small compared to the population size, see e.g. [Ball and Donnelly(1995), +Barbour and Reinert(2013), Montagnon(2019)] for the classical mixed SIR model. +From a mathematical point of view, the first challenge is represented by the unboundedness +of the jump rates: on the one hand a site in V can have a huge number of close-by neighbours, +so that the jump rate of a single individual can be arbitrarily large at that site; on the other +hand there is no restriction on the number of individuals that can occupy a given site. Proving +that such a process is well-posed is in itself not trivial: both classic and more recent techniques +for proving existence fail to apply to our framework. The second challenge is represented by +the irregularity of the support V combined with the lack of reversibility of the system, due to +births and deaths. In order to study the limiting behaviour of the process, we need to gather +approaches coming from statistical mechanics and mathematical biology. To be more precise, +the way we can cope with the random geometry of the underlying graph is through stochastic +homogenization and in particular the results of [Faggionato(2022a)]. The theory of homog- +enization, first started in a deterministic context by analysts, describes how the microscopic +irregularities of a medium affect the macroscopic behavior of the system. It is by now well +understood how to use this technique to derive hydrodynamic limits for reversible particles +systems, see e.g. [Gonçalves and Jara(2008), Faggionato(2022b)]. Yet, to our knowledge, one +fundamental requirement for obtaining these results has been the reversibility of the process. +In our context we need to adapt some tools to non–reversible population models, in the vein of +e.g. [Kurtz(1981), Bansaye and Méléard(2015)]. In particular, a fundamental ingredient is the +extension of an inequality for the supremum of a particle process due to Kipnis and Varadhan. +This estimate is required for the proof of tightness, for the identification of the limit and to +show that this limit has a density with respect to the Lebesgue measure. +1.1. Model and main results. For some probability space (Ω, P, F) and ω ∈ Ω, let V = +V (ω) be the points of a Poisson point process on Rn with n ≥ 2 and intensity γ > 0 under +P. Let E = E(ω) = {{x, y}, x, y ∈ V } be the set of unoriented edges between the points +of V . We will consider at first the complete graph G = (V, E) as a support for our particle +system, while in Section 6 we will discuss how to extend our results by generalizing G via bond +percolation. Given ω and a configuration of particles η ∈ NV consider the transitions +η −→ + + + + + +ηx,y +with rate η(x)r(x, y) +ηx,+ +with rate b η(x) +ηx,− +with rate d η(x) +(1) +where ηx,y = η − 1x + 1y is the configuration obtained from η by subtracting one particle in +x ∈ V and adding one in y ∈ V , ηx,+ = η + 1x adds one particle in x ∈ V and ηx,− = η − 1x + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS 3 +has one particle less in x ∈ V . The positive numbers +r(x, y) = r(y, x) = e−∥x−y∥ +are the jump rates for each particle to go from point x ∈ V to point y ∈ V , and vice-versa. +For simplicity we set r(x, x) = 0 for all x ∈ V . We let +r(x) := +� +y∈V : {x,y}∈E +r(x, y) +be the total jump rate of a particle at site x ∈ V . It is not hard to show that, P–almost surely, +r(x) is finite for every x ∈ V . The parameters b, d ≥ 0 are the individual rate of birth and +death of the particles, respectively. +For a given realization G(ω) of the graph, we introduce a probability space with measure P ω +under which we will construct our particle process. Eω indicates the associated expectation. +Let η0 be the initial configuration of particles. Our first result establishes that, for P–almost +every ω, there exists a Markov process with jump rates given by (1) as soon as η0 has uniformly +bounded expectation on each site. +Theorem 1.1. For P-a.a. ω ∈ Ω the following holds. Let η0 be a random variable on NV such +that, for some M ∈ N, one has Eω[η0(x)] ≤ M for all x ∈ V . Then, for all T > 0, there exists +a Markov process (ηt)t∈[0,T] with initial value η0 and paths in the Skohorod space D([0, T], NV ) +that satisfies the following: for functions fG : NV → R of the form fG(η) = � +x∈V G(x)η(x) +with G compactly supported on Rn, the generator L of (ηt)t∈[0,T] is given by +LfG(η) = +� +x,y∈V +η(x)r(x, y) +� +G(y) − G(x) +� ++ +� +x∈V +η(x) +� +b − d +� +G(x) . +(2) +Our second result establishes the hydrodynamic limit of the process. Let M(Rn) be the +Polish space of non-negative Radon measures on Rn endowed with the vague topology. For +π ∈ M(Rn) and a continuous function G ∈ C(Rn) we write ⟨π, G⟩ = +� +Rn G(y) π(dy). We +consider a scaling parameter N ∈ N and associate to each element η ∈ NV the empirical +measure πN = πN(η) = N −n � +x∈V η(x)δx/N ∈ M(Rn), where δy represents a Dirac mass at +y ∈ Rn. Conversely, we can recover η from πN via η(·) = η(πN)(·) = N nπN(·/N), so that for +any fixed N we may use πN and η indifferently. In this work, we are interested in the regime +where the motion is faster than births and deaths (resp. of infection and recovery rates for +epidemics). Thus, for a given N, we introduce now the process ηN with sped–up motion. For +G compactly supported, its generator is given by (recall that fG(η) = � +x∈V G(x)η(x)) +LNfG(η) = +� +x∈V +η(x)LNG(x/N) + +� +x∈V +η(x) +� +b − d +� +G(x/N) . +(3) +Here +LNG(x/N) = +� +y∈V +N 2r(x, y) +� +G(y/N) − G(x/N) +� +(4) +is the generator of the random walk on V/N := {x/N : x ∈ V (ω)} with transition rates +N 2r(·, ·). The associated measure-valued process is defined as +πN +t := +1 +N n +� +x∈V +ηN +t (x)δx/N . +(5) + +4 +V. BANSAYE AND M. SALVI +Theorem 1.1 guarantees that, for all T > 0 and fixed N ∈ N, (πN +t )t∈[0,T] is a well-defined +Markov process with values in D([0, T], M(Rn)), the space of measure-valued càdlàg processes. +For the scaling limit, we need to consider initial conditions such that the tails of η0 are +dominated by a product of (translated) Poisson distributions indexed by V , as precisely defined +here below. This allows in particular to invoke the existence and characterization stated in +Theorem 1.1. For example, one can take configurations with a number of particles that is +a constant on each site or that is distributed as i.i.d. Poisson random variables, or a sum of +the two. We also need that the initial conditions converge as N goes to infinity. For a given +realization of the graph ω ∈ Ω, we make thus the following assumption. +Domination & Convergence Assumption. The sequence of random configurations (ηN +0 )N∈N +satisfies the following: +(i) There exists M ∈ N0 and ρ > 0 such that for any N ∈ N, x ∈ V and for any A ⊂ V +and (nx)x∈A ∈ N|A|, +P ω� +∀x ∈ A, ηN +0 (x) ≥ M + nx +� +≤ +� +x∈A +� +∞ +� +j=nx +ρje−ρ +j! +� +. +(6) +(ii) There exists a bounded Borel function ρ0 : Rn → [0, ∞) such that, for any C∞ function +with compact support G ∈ C∞ +c (Rn), +lim +N→∞ N −n � +x∈V +ηN +0 (x)G(x/N) = +� +Rn G(x)ρ0(x) dx +(7) +in P ω–probability. +Consider now B(Ω), the family of bounded Borel functions on Ω, and let σ2 ≥ 0 be char- +acterized by the variational formula +σ2 := 1 +2 +inf +ψ∈B(Ω) E0 +� � +y∈V +r(0, y) +� +y1 + ψ(θyω) − ψ(ω) +�2� +. +(8) +Here y1 denotes the first coordinate of y ∈ Rn and θyω is the environment translated by y +(see Section 3.1 for the precise meaning of this). The expectation E0 is taken with respect +to the Palm measure relative to the underlying Poisson point process, which can be obtained +by just adding to the configuration a point at the origin (see [Daley and Vere-Jones(2008)] +for a complete account of Palm measures). Calling In the n-dimensional identity matrix, we +point out that 2σ2In is the diffusion matrix of the Brownian motion obtained by rescaling +diffusively the random walk on the Poisson point process with transition rates r(x, y), see +e.g. [Faggionato(2022a)]. +Theorem 1.2. For P-a.a. ω ∈ Ω the following holds. Let (ηN +0 )N∈N be a sequence of random +variables on NV which satisfies the Domination & Convergence Assumption for some bounded +Borel function ρ0 : Rn → [0, ∞). Then the sequence of processes {(πN +t )t∈[0,T]}N∈N with initial +value πN +0 += πN(ηN +0 ) converges in law in D([0, T], M(Rn)) to the deterministic trajectory +(ρ(t, u) du)t∈[0,T], where ρ(·, ·) : [0, T] × Rn → R is the unique weak solution of the problem +� +∂tρ = σ∆ρ + (b − d)ρ +ρ(0, ·) = ρ0 +. +(9) + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS 5 +Since the sequence of processes converges in distribution to a deterministic process, we +obtain immediately the following convergence in probability. +Corollary 1.3. Under the hypothesis of Theorem 1.2 we have that, for all t > 0, G ∈ Cc(Rn) +and ε > 0, +lim +N→∞ P ω����N −n � +x∈V +G(x/N)ηt(x) − +� +Rn G(x)ρ(x, t) dx +��� ≥ ε +� += 0 . +1.2. State of the art, techniques and structure of the paper. The rest of the paper is +substantially divided into two parts corresponding to the proofs of the two main theorems. +Section 2: Theorem 1.1 establishes the well-posedness of the process. As mentioned +before, our setting does not seem to be treated in the previous literature, even if +we set the rates of birth and death equal to 0. +The case b = d = 0 corresponds +to an instance of the so called zero-range process on G. The existence of the zero- +range process on an arbitrary countable state space was proved in the classical work +[Liggett(1973)] and then under weaker assumptions in [Andjel(1982)]. A first require- +ment for those constructions is that, in some sense, the rate of jump of each particle +must be uniformly bounded from above, a condition that fails in our setting due to +the irregularity of V . A second problem is that in [Liggett(1973)] and [Andjel(1982)] +one must impose a restriction on the initial configuration of particles η0. Namely, +one accepts only η0 satisfying � +x∈V η0(x)α(x) < ∞ for some function α such that +� +y∈V p(x, y)α(y) ≤ Mα(x), where M > 0 is a given constant and p(x, y) indicates +the probability to go from x to y when the particle jumps. In our case, again because +of the irregularity of V , this condition would not allow us to consider, for example, +initial conditions with a constant number of particles on each site. Neither more re- +cent approaches to prove existence for general particle systems on random graphs, like +[Ganguly and Ramanan(2022)], cover our model, because of the unboundedness of the +jump rates. +We adopt a different approach which borrows from [Andjel(1982)] the idea of ghost +particles. In Section 2.1 we enlarge our space and consider a richer measure-valued +process where, roughly put, particles are labelled and leave a “ghost” behind them +every time they jump to a new site. To show existence of the original process, we +pass through the well-posedness of the stochastic differential equation (12) associated +to this richer measure-valued process. In Section 2.2 we prove the existence when we +restrict the dynamics to a finite subgraph of V . We also pin down a key estimate of +how many particles have visited a given compact set up to time T in mean, making +use of the ghosts (Lemma 2.1). In Section 2.3 we extend the existence of the process +when considering the whole infinite graph, but under the condition of having a finite +number of particles at time 0. This is achieved by showing that the range covered by +the particles stays finite almost surely, see Proposition 2.2. Finally, in Section 2.4, we +include in our construction also the case of an infinite number of initial particles. +Section 3: In this section we prepare some of the technical tools that are necessary for +the proof of Theorem 1.2. The operator LN can be thought of as a discretization of +the operator σ2∆. For a given G ∈ C∞ +c (Rn), though, some difficulties arise if one tries +to prove directly the convergence of LNG, due to the possible lack of regularity of +this last object. To overcome the problem, one wants to substitute G by a regularized + +6 +V. BANSAYE AND M. SALVI +version Gλ +N for which LNGλ +N directly yields the expected limit σ2∆G. This proce- +dure, introduced in [Jara and Landim(2008)] in the context of hydrodynamic limits +and further developed in [Gonçalves and Jara(2008)], requires results from stochas- +tic homogenization theory. First of all, in Section 3.1, we prove that indeed we are +allowed to use the homogenization machinery elaborated in [Faggionato(2022a)]. In +Section 3.2 we introduce Gλ +N, prove some bounds in norm for this function and use +the homogenization results to show its convergence to G in L1 and in L2, see Lemma +3.3 and Lemma 3.4. +Section 4: One of the main technical ingredients for proving the hydrodynamic limit +of the sequence of processes {(πN +t )t∈[0,T]}N∈N is the Kipnis-Varadhan estimate to +control the supremum of the particle process integrated against a test function, see +[Kipnis and Varadhan(1986)]. The estimate in its classic form, though, is valid only +for reversible processes. In Lemma 4.1 we adapt the Kipnis-Varadhan estimate to +our model without births and deaths, which is reversible but presents some issues due +to the irregularity of V . In Lemma 4.3 we extend the estimate to the non-reversible +setting. The idea is to look separately at each branch of the genealogical tree of the +particles in the initial configuration. The process that looks at particles of a given +branch can then be dominated by another (reversible) process, to which we can apply +the original Kipnis-Varadhan type of estimate. This dominating process is obtained +via a percolation procedure on the particles in the initial configuration. +Section 5: The strategy to prove Theorem 1.2 follows a classical tightness and identifi- +cation procedure, which relies on the two previous sections. In Section 5.1 we consider +the martingale problem and show that the process MN appearing in (74) is an L2 +martingale via a truncation argument. We also prove that MN tends to 0 in L2 as N +tends to infinity. This enables to easily conclude the proof of tightness by Aldous’ cri- +terion. Finally in Lemma 5.6 we prove that a limiting value (πt)t∈[0,T] of the sequence +{(πN +t )t∈[0,T]}N∈N must have a density with respect to the Lebesgue measure and that +it has to satisfy a suitable differential equation that admits a unique weak solution, +cfr. (71). For simplicity of exposition the proof until this point has been elaborated for +the case without deaths, d = 0, and we conclude the section by extending the result +to the general case d > 0, see Section 5.4. +Section 6: In the very last part of the paper we show that our two main theorems +continue to hold if we consider a percolation procedure on the edges of the complete +graph with nodes V . As special cases of interest for applications, we analyze the long- +range percolation and scale-free percolation random graphs. We conclude in Section +6.2 with a discussion of open problems. +1.3. Notation. For a given realization of the graph G = G(ω), with ω ∈ Ω, we recall that +P ω is the probability measure under which we have built the process defined in Theorem 1.1 +and Eω is the relative expectation. We will make clear each time what initial distribution of +particles the process is starting from, but sometimes we will further stress the initial condition +with a subscript. For example, if the initial distribution of particles on G(ω) is µ, then we can +write P ω +µ . +Remark 1.4. Through most of the proofs of the paper, we will talk directly about P ω, without +specifying each time that ω ∈ Ω is a realization of the underlying graph sampled according to +measure P. All the processes that appear will evolve under P ω. All the claims about these + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS 7 +processes have to be intended to be true for P–almost all ω, even when we do not mention it +explicitly. +As mentioned before, M = M(Rn) stands for the Polish space of non–negative Radon +measures on Rn endowed with the vague topology (namely, a sequence of measures νn converges +to a measure ν in M if ⟨νn, f⟩ → ⟨ν, f⟩ for all f ∈ Cc(Rn)). Consequently, D([0, T], M(Rn)) +indicates the space of measure-valued càdlàg processes. +2. Existence and characterization of the process +We will prove the existence of the process just with d = 0. Indeed, a positive rate of death +of the particles cannot contribute to the explosion of the process in finite time (if anything, +it can help prevent it). So, if the process is well-defined for d = 0, a completely analogous +construction proves that it is also well-defined for any d > 0. +2.1. Measure valued process. In order to prove Theorem 1.1 with d = 0 we will have to +consider an auxiliary process that encodes more information than (ηt)t∈[0,T] and that lives in +the space of measure-valued processes. Let I := N × � +k≥0{1, 2}k. Under measure P ω, let +(N x,y +i +)i∈I,x,y∈V be a collection of Poisson point measures on R+ with intensity r(x, y) dt and +recall that r(x, x) = 0 for each x ∈ V . These Poisson point measures are chosen independent +for each ordered couple (x, y) and they are also independent of the initial state η0. Also let +(N b +i )i∈I be a collection of independent Poisson point measures on R+ with intensity b dt and +independent of the N x,y +i +’s and η0. The interpretation is the following: I shall be thought +of as the space of labels attached to each single particle. Particles that are present at time +0 will be just labelled with the natural numbers. If particle i is present at time t ≥ 0, we +call Xi +t ∈ V its position. Suppose a particle with label i = n i1i2 . . . ik, with n ∈ N and +k ∈ N0 and ij ∈ {1, 2} for j = 1, . . . , k, is at position Xi +t− at time t− and suppose that +N b +i (t) − N b +i (t−) = 1. Then particle i disappears at time t and is replaced by two particles +with labels n i1i2 . . . ik1 and n i1i2 . . . ik2 on the same site. If instead particle i is at Xi +t− = x +at time t− and N x,y +i +(t) − N x,y +i +(t−) = 1, then particle i disappears and generates particle i1 +at site y, that is, Xi1 +t = y, and it leaves behind a ghost particle on site x labelled with i. This +way of labelling the particles is commonly known as the Ulam–Harris–Neveu notation. +Let +�πt = +� +i∈At +δ(i,Xi +t,a) + +� +i∈Gt +δ(i,Xi +t,g) +be the measure on I × V × {a, g} keeping track of position and state of each particle. For any +i ∈ I and u ∈ {a, g}, one has �πt({i} × V × {u}) ∈ {0, 1}. More precisely, +At := {i ∈ I : �πt({i} × V × {a}) > 0} +(10) +is the set of particles that are present at time t and that can jump or give birth, also called +alive particles, while +Gt := {i ∈ I : �πt({i} × V × {g}) > 0} +(11) +is the set of ghost particles present at time t. + +8 +V. BANSAYE AND M. SALVI +Our aim is to construct the process (�πt)t≥0 which satisfies P ω–a.s., on every compact set, +�πt = �π0 + +� t +0 +� +i∈As−, y∈V +� +δ(i1,y,a) + δ(i,Xi +s−,g) − δ(i,Xi +s−,a) +� +N +Xi +s−,y +i +(ds) ++ +� t +0 +� +i∈As− +� +δ(i1,Xi +s−,a) + δ(i2,Xi +s−,a) − δ(i,Xi +s−,a) +� +N b +i (ds) +(12) +where, for i ∈ At, Xi +t is the unique element x ∈ V such that �πt({i} × {x} × {a}) = 1. The +initial configuration �π0 might be random under P ω. +For a Borel set A ⊆ Rn, we also let +πt(A) := �πt(I × (A ∩ V ) × {a}) +(13) +be the total number of alive particles in A at time t. We will see that (πt)t∈[0,T] corresponds +to the measure-valued process (πN +t )t∈[0,T] introduced in (5) with N = 1. +We will show the existence of the process (ηt)t∈[0,T] by constructing the richer process +(�πt)t∈[0,T] in three steps. First, in Section 2.2, we will show the existence of an analogous +process restricted to a finite graph. We will then build on this to extend the existence of +the process on the infinite graph, but only when the initial configuration has a finite number +of particles, see Section 2.3. Finally in Section 2.4 we will conclude with the existence of +(�πt)t∈[0,T] under the conditions of Theorem 1.1. +2.2. Existence of the process on a finite graph. In this section we deal with a version +of the process (�πt)t∈[0,T] for which the underlying spatial point process is restricted to a +finite number of points. +Fix a bounded set B ⊂ Rn. +The process (�πB +t )t∈[0,T] is defined +as the strong solution of a stochastic differential equation whose jumps are represented by +the Poisson point measures introduced in Section 2.1. +Analogously to (10) and (11), let +AB +t := {i ∈ I : �πB +t ({i} × V ∩ B × {a}) > 0} and GB +t := {i ∈ I : �πB +t ({i} × V ∩ B × {g}) > 0}. +For i ∈ AB +t , we write Xi +t the location of particle i at time t, that is the unique point x ∈ V +such that �πB +t ({i, x, a}) = 1. Then (�πB +t )t∈[0,T] is defined via +�πB +t = �πB +0 + +� t +0 +� +i∈AB +s−, y∈V ∩B +� +δ(i1,y,a) + δ(i,Xi +s−,g) − δ(i,Xi +s−,a) +� +N Xi +s,y +i +(ds) ++ +� t +0 +� +i∈AB +s− +� +δ(i1,Xi +s−,a) + δ(i2,Xi +s−,a) − δ(i,Xi +s−,a) +� +N b +i (ds) , +(14) +where the initial configuration �πB +0 is a point measure on I ×(V ∩B)×{a, g}, possibly random +under P ω. Notice that we do not impose any restriction on the initial configuration at this +stage. +Let us justify that there exists a unique solution to this stochastic differential equation. +Since we deal with a countable discrete state space, the existence of such a solution can be +shown just by constructing a stochastic process with a classical inductive scheme, where the +successive jumps are given by the Poisson point measures. This is a strong Markov process +which is well defined until the potential accumulation point of the jumps (if explosion occurs) +and it is the solution of (14) until the time of explosion by construction. We just need to prove + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS 9 +that explosion does not occur almost surely. For that purpose, let us introduce the projection +of the process on the last two coordinates +ZB +t (K, u) := �πB +t (I, V ∩ K, u) +K ⊆ Rn, u ⊆ {a, g} . +ZB +t (K, u) counts the alive or ghost particles in K at time t. Under P ω, (ZB +t )t∈[0,T] is a multi- +type branching process with a finite number of types (that is, (V ∩ B) × {a, g}) and bounded +reproduction mean: at rate r(x, y) each particle of type (x, a) is replaced by two particles of +types (x, g) and (y, a). At rate b each particle of type (x, a) creates a new particle of type +(x, a). The particles of type (·, g) do not evolve. Using classical first moment estimates of +the branching process (ZB +t )t∈[0,T] we obtain non-explosivity of the process, so (�πB +t )t∈[0,T] is +well defined for any positive time T > 0, see e.g. [Bansaye and Méléard(2015)]. Actually, we +will need more quantitative estimates on the first moment of ZB +t +for the limiting procedure +in the next section, in particular the dependance on the transition rates. These estimates are +given in the next lemma, using the harmonic function of the branching process, which is here +constant in space. +Lemma 2.1. Consider two compact sets K, B ⊂ Rn with K ⊆ B. Take a (possibly random) +initial configuration ZB +0 such that ZB +0 (B, {g}) = 0 and, for some M > 0, Eω[ZB +0 (x, {a})] ≤ M +for all x ∈ B ∩ V . Then it holds, for all T > 0, +Eω[ZB +T (K, {a, g})] ≤ CKMebT , +(15) +where CK = � +x∈K∩V (b−1r(x) + 1). In particular CK does not depend on B. +Proof. We define the matrix MB +t +with entries indexed by the types +MB +t +� +(x, u), (y, v) +� += Eω +δ(x,{u}) +� +ZB +t (y, {v}) +� +x, y ∈ B ∩ V, u, v ∈ {a, g} , +so that MB +t +� +(x, u), (y, v) +� +indicates the mean number of particles of type (y, v) present at +time t if we started with a unique particle of type (x, u) at time 0. This matrix is the first +moment semigroup associated to a branching process and thus coincides with exp(tA), where +A = A(B) is a finite matrix given by +A = +� +A1 +A2 +0 +0 +� +with the following blocks: A1 is the submatrix accounting for the evolution of a–particles, +that is, for x ̸= y ∈ B∩V , we have A1(x, x) = b − rB(x) with rB(x) := � +z̸=x,z∈B∩V r(x, z) +and A1(x, y) = r(x, y). A2 accounts for the generation of g–particles from a–particles, that +is, A2(x, y) = r(x, y). The two lower blocks have all the entries equal to 0, since g–particles +neither move nor generate other g–particles. To see that, one may use Kolmogorov forward +equation or apply the differential equation (14) to (I, y, v) with �πB +0 += δ{1,x,{u}} and take +expectation on both sides to get MB +t +� +(x, u), (y, v) +� += δ(x,u)=(y,v) + +� t +0 MsA +� +(x, u), (y, v) +� +ds. +We compute now exp(At): +Ak = +� +Ak +1 +Ak−1 +1 +A2 +0 +0 +� +=⇒ +eAt = +� +eA1t +� +k≥1 +Ak−1 +1 +A2 +k! +tk +0 +Id +� +where Id is the identity matrix. Call z0 = (M, M, . . . , M ; 0, 0, . . . , 0) the configuration having +M alive particles on each site of B∩V and no ghost particles. Then, for any initial configuration + +10 +V. BANSAYE AND M. SALVI +ZB +0 with less than M alive particles per site in B ∩ V in average and no ghosts as in the +hypothesis of the lemma, it holds +Eω� +ZB +T (K, {a, g}) +� += Eω� +ZB +0 eAT (K, {a, g}) +� += Eω[ZB +0 ]eAT (K, {a, g}) ≤ z0eAT (K, {a, g}) . +Since ¯1 = (1, 1, . . . , 1) is an eigenvector for the matrix A1 with eigenvalue b (that is, ¯1A1 = +(b, b, . . . , b)) and since +¯1 +� +k≥1 +Ak−1 +1 +A2 +k! +tk = +� +k≥1 +bk−1(1, . . . , 1)A2 +k! +tk = +� +k≥1 +bk−1 +k! tk(rB(x))x∈B = b−1(ebt − 1)(rB(x))x∈B , +it holds +z0eAt = M(ebt, . . . , ebt ; b−1(ebt − 1)r(·), . . . , b−1(ebt − 1)r(·)) +and we can conclude that +Eω[ZB +T (K, {a, g})] ≤ MebT #{K ∩ V } + Mb−1ebT +� +x∈K∩V +r(x) , +which implies (15). +□ +2.3. Existence of the process on the infinite graph with a finite number of initial +particles. In this section we want to show that the process (�πt)t≥0 described in (12) is well +defined when we start with a configuration with a finite number of particles. We will show +that (�πt)t≥0 can be in fact obtained as the limit of the process (�πBN +t +)t≥0 introduced in Section +2.2, where BN is the n-dimensional box [−N, N]n. +Consider the process (�πBN +t +)t∈[0,T] introduced in Section 2.2 up to time T > 0. We want +to show now that this process “stabilizes” as N tends to infinity. That is, suppose to use the +same source of randomness (i.e. the same realization of the Poisson processes N x,y +i +, N b +i ) to +construct the process (�πBN +t +)t∈[0,T] for all different N’s. Then, with P ω–probability 1, there +exists N0 ∈ N such that, for all N ≥ N0, +(�πBN +t +)t∈[0,T] ≡ (�π +BN0 +t +)t∈[0,T] . +(16) +To this end, we first of all prove that the progeny of a finite number of particles remains into +a finite region up to time T > 0 with probability 1 as N → ∞. +For an initial configuration of alive and ghost particles z0, define the maximal displacement +at time T as +RN(z0, T) := sup +t∈[0,T] +sup +i∈At +∥XN,i +t +∥ , +where XN,i +t +is the position of particle i at time t in the process (�πBN +t +)t∈[0,T]. +Proposition 2.2. Consider an initial configuration z0 with a finite number of alive particle. +Then, P ω–almost surely, there exists Q > 0 such that RN(z0, T) ≤ Q for all N ∈ N. +Proof. We first consider z0 to be constituted of a unique alive particle, labelled with 1. Without +loss of generality we can imagine particle 1 to start at the origin. Abbreviate R = RN(z0, T) +and consider (�πBN +t +)t∈[0,T] for any N. For M > 0, we can bound +P ω(R > M) = +∞ +� +ℓ=0 +P ω(R > M | E2ℓ+1)P ω(E2ℓ+1) +(17) + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS11 +with +Eℓ = Eℓ(T) := {particle 1 had ℓ − 1 descendants up to time T} . +By descendant of particle 1 we mean a particle with label starting by 1 and that was generated +via a birth event (so we do not count the particles whose label start by 1 that were generated +with a change of label due to a jump event). We are considering only odd integers 2ℓ + 1 +since each time a particle disappears it generates two new particles. For ℓ ∈ N, the quantity +P ω(Eℓ) is clearly dominated by P ω(E+ +ℓ ), with +E+ +ℓ := {particle 1 had at least ℓ − 1 descendants up to time T} . +The number ZBN +t +(Rn, {a}) of alive particles at time t follows a N-valued Markov process +starting in 1 and that goes from k to k+1 with rate kb. Let (ek)k∈N be independent exponential +random variables under P ω with Eω[ek] = (bk)−1 and let Sℓ := �ℓ +k=1 ek. We bound, for all +θ > 0, +P ω� +Sℓ ≤ T +� +≤ eθT Eω[e−θSℓ] = eθT +ℓ +� +k=1 +� +1 − +θ +kb + θ +� +≤ eθT−�ℓ +k=1 +θ +kb+θ , +(18) +where for the first inequality we have exploited the exponential Markov inequality, while for +the second passage we have used the independence of the ek’s and the formula for the moment +generating function of the exponential distribution. +Notice that if the total number of descendants of particle 1 at time T is 2ℓ, then ZBN +t +(Rn, {a}) = +ℓ + 1. A (non-optimized) choice of θ = 4b in (18) yields therefore +P ω(E+ +2ℓ+1) = P ω(ZBN +t +(Rn, {a}) ≥ n + 1) = P ω� +Sℓ+1 ≤ T +� +≤ e4bT−�ℓ+1 +k=1 +4 +k+4 ≤ Cℓ−4 +(19) +for ℓ sufficiently large and some universal constant C > 0. +We move to the analysis of the term P(R > M | Eℓ) in (17). Abandoning for a moment the +Ulam–Harris–Neveu notation, let us look at the descendants of particle 1 and just label them +2, 3, 4, ... in chronological order of birth (particles 2j and 2j + 1 are born in the same instant, +for all j). Let xk ∈ V and tk ∈ [0, T] be the site and the time where the k-th particle was born +and let (Xk +t ) be its trajectory while alive. Let Rk be the maximal displacement of particle k, +that is, Rk := supt∈[tk,T] ∥Xk +t − xk∥. We observe that +P ω(R > M | Eℓ) = P ω� +∃k ∈ {1, . . . , ℓ} : Rk > M/ℓ and Rj < M/ℓ for all j < k | Eℓ +� +≤ +ℓ +� +k=1 +P ω� +Rk > M/ℓ +�� Ak,ℓ +� +, +(20) +with Ak,ℓ := {Rj < M/ℓ for all j < k} ∩ Eℓ. +We have therefore to study the probability +that (Xk +t ) left the ball BM/ℓ(xk) before time T knowing that the first k − 1 particles had a +displacement smaller than M/ℓ. We point out that, under Ak,ℓ, we have that BM/ℓ(xk) is +completely contained in BM: it follows that all x ∈ BM/ℓ(xk) have r(x) < C log M by Lemma +2.3 (see here below) and that BM/ℓ(xk) contains at most CMn log M/ℓn points of V (this +follows from item (ii) in the proof of Lemma 2.3). +Let +τk := inf +� +t ∈ [tk, T] : Xk +t ̸∈ BM/ℓ(xk) +� +. + +12 +V. BANSAYE AND M. SALVI +We decompose the event {τk ≤ T} = {τ ′ +k < τk ≤ T} ∪ {τk ≤ T, τ ′ +k} with +τ ′ +k := inf +� +t ∈ [tk, T] : ∥Xk +t − Xk +t−∥ ≥ +� +M/ℓ +� +the first time that particle k makes a jump longer than +� +M/ℓ. Under Ak,ℓ, the event that +(Xk +t ) makes a jump of length larger than +� +M/ℓ inside BM/ℓ(xk) has rate smaller than e−√ +M/ℓ +times the number of the points in BM/ℓ(xk). It follows that +P ω(τ ′ +k < τk ≤ T | Ak,ℓ) ≤ P ω(ξ ≤ T) ≤ cTe−√ +M/ℓMn log M/ℓn +(21) +for some universal constant c > 0, where ξ is an exponential random variable with parameter +Ce−√ +M/ℓMn log M/ℓn. +On the other hand, the event {τk ≤ T, τ ′ +k} implies that Xk has +performed more than +� +M/ℓ jumps before time T. Remember that under Ak,ℓ each jump +has rate smaller than C log M by Lemma 2.3. Hence, if Y is a Poisson random variable of +parameter CT log M, +P ω(τk ≤ T, τ ′ +k | Ak,ℓ) ≤ P ω(Y > +� +M/ℓ) ≤ e−CT log M� +(M/ℓ)−1/2eCT log M +�√ +M/ℓ +(22) +where the last bound holds for M/ℓ sufficiently large, for example when ℓ ≤ +√ +M, see +e.g. [Vershynin(2018), Exercise 2.3.3]. Continuing from (20), bounds (21) and (22) together +yield, for M sufficiently large and ℓ ≤ +√ +M, +P ω(R > M | Eℓ) ≤ +ℓ +� +k=1 +P ω(τk ≤ T +�� Ak,ℓ) ≤ c1ℓe−c2√ +M/ℓ . +(23) +Going back to (17) and using (54) and (23), we finally have +P ω(R > M) ≤ +√ +M +� +ℓ=0 +P ω(R > M | E2ℓ+1) + +∞ +� +ℓ= +√ +M +P ω(E+ +2ℓ+1) +≤ c1Me−c2M1/4 + CM−3/2. +This quantity is summable in M, which implies the claim by the Borel-Cantelli lemma for a +single initial particle. The argument can be easily generalized to any finite number of initial +particles. +□ +Lemma 2.3. There exists C > 0 such that, for P-a.a. ω, the following holds: there exists +¯N = ¯N(ω) such that ∀N ≥ ¯N one has +max +x∈BN∩V r(x) < C log N . +Proof. We will use the two following trivial facts about Poisson point processes. Recall that +BN = [−N, N]n. Let (Bi)i=1,...,(2N)n be a collection of disjoint (up to their border) volume-1 +cubes covering BN and let Cj := {x ∈ Bj+1 \ Bj}, for j ∈ N, be the j-th square-crown around +the origin. Then there exist constants c1, c2 > 0 only depending on the dimension n such that, +for P-a.a. ω, +(i) there exists N1 = N1(ω) such that for all N ≥ N1 +#{x ∈ CN ∩ V } ≤ c1N n−1 ; +(ii) there exists ¯N2 = ¯N2(ω) such that for all N ≥ ¯N2 and for all i = 1, . . . , (2N)n +#{x ∈ Bi ∩ V } ≤ c2 log N . + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS13 +Both facts can be checked by using classic concentration inequalities for Poisson random +variables around their mean and then the Borel-Cantelli lemma. +Take N ≥ max{N1, N2} and write, for x ∈ BN, +r(x) = +� +y∈B2N ∩V +e−∥x−y∥ + +� +y∈Bc +2N∩V +e−∥x−y∥ . +(24) +For the first sum we divide B2N into Bi’s as for item (ii) above, with i = 1, . . . , (2N)n. Notice +that, for all k ∈ N, there are less than c3kn−1 such boxes at distance k from x, for some c3 > 0 +that only depends on the dimension n. Furthermore, in each of these boxes there are at most +c2 log(2N) vertices by (ii). Hence it holds +� +y∈B2N∩V +e−∥x−y∥ ≤ +2N +� +k=0 +c3e−kkn−1 · c2 log(2N) ≤ c5 log N . +(25) +For the second sum in (24) we use item (i) and bound +� +y∈Bc +2N∩V +e−∥x−y∥ = +∞ +� +k=N +� +y∈Ck∩V +e−∥x−y∥ ≤ +∞ +� +k=N+1 +c1kn−1e−(k−N) ≤ c6 +(26) +for some c6 > 0. Putting (25) and (26) into (24) gives the result. +□ +Corollary 2.4 (Corollary of Proposition 2.2). Consider a compact set Q ⊂ Rn. Let Z0 ∈ NV × +NV be an initial configuration such that Z0(x, {a, g}) = 0 for all x ̸∈ Q∩V and Eω[Z0(x, a)] ≤ +M for all x ∈ Q ∩ V . Then, P ω–a.s., for every I ∈ I, K ⊂ Rn compact, U ⊆ {a, g} and +t > 0, the following limit exists: +�πt(I, K, U) := lim +N→∞ �πBN +t +(I, K, U) . +Furthermore, the following holds: +(i) The measure (�πt)t≥0 verifies equation (12), where the two sides are finite measures and +coincide on Rn. +(ii) Defining for all compact sets K ⊂ Rn, for all u ⊆ {a, g} and for all t ≥ 0 +Zt(K, u) := �πt(I, K, u) , +one has, for all T > 0, +Eω[ZT (K, {a, g})] ≤ CKMebT +(27) +where CK = � +x∈K(b−1r(x) + 1). +Proof. The existence of �πt follows immediately from Proposition 2.2, since it implies that (16) +holds P ω–a.s. for all T > 0. In particular, (16) and the fact that �πBN +T +(I, Rn, {a, g}) < ∞ +almost surely (which follows by (15)) imply that �πT(I, Rn, {a, g}) < ∞ almost surely. We let +As := limN→∞ ABN +s +for almost every realization of the process. + +14 +V. BANSAYE AND M. SALVI +For item (i), we first notice that (�πBN +t +) satisfies (14) with B = BN. P ω–a.s., for all bounded +test functions f with support on some set C ⊂ Rn and for all t ∈ [0, T] we have +� t +0 +� +i∈As−, y∈V +��f(i1, y, a) + f(i, Xi +s−, g) − f(i, Xi +s−, a) +�� N +Xi +s−,y +i +(ds) +≤ 3∥f∥∞ +� � T +0 +� +i∈As−, Xi +s−∈C, y∈V +N +Xi +s−,y +i +(ds) + +� T +0 +� +i∈As−, Xi +s−̸∈C, y∈C +N +Xi +s−,y +i +(ds) +� +≤ 3∥f∥∞�πT (I, C, {a, g}) < ∞ +(28) +since �πT (I, Rn, {a, g}) < ∞. +Similarly the number of births in C is a.s. controlled by +�πT (I, C, {a, g}): +� t +0 +� +i∈As− +��f(i1, Xi +s−, a) + f(i2, Xi +s−, a) − f(i, Xi +s−, a) +�� N b +i (ds) ≤ 3∥f∥∞�πT (I, C, {a, g}) . +(29) +Indeed, for each newborn in C there is either an active particle in C or, at least, a ghost. To +sum up, the integrals appearing on the right hand side of (12) are almost surely well defined +on Rn and finite and (i) follows from (14) by letting N go to infinity. +We turn our attention to item (ii). By Fatou’s lemma and Proposition 2.1, +Eω[ZT (K, {a, g})] ≤ lim inf +N→∞ Eω[ZBN +T +(K, {a, g})] ≤ CKMebT . +This ends the proof. +□ +2.4. Existence of the process on the infinite graph with infinitely many initial +particles. In the previous section we have shown that the process (�πt)t∈[0,T] is well defined +as soon as the initial condition involves only a finite number of particles. We want to show +the existence of (�πt)t∈[0,T] also for initial configurations where the average number of particles +on each site is bounded. +Consider an initial configuration of particles Z0 ∈ NV × NV of alive and ghost particles +such that Eω[Z0(x, a)] ≤ M. For N ∈ N the truncated configuration Z0,N is obtained by +considering only the particles in Z0 that are inside the ball BN: +Z0,N(x, ·) = Z0(x, ·)1x∈BN . +A central observation is that we have monotonicity in N of the process: take N1 < N2 and +couple the processes started in Z0,N1 and Z0,N2, call them (�πt,Nj)t≥0 for j = 1, 2. Then we +have P ω–a.s. +�πt,N1(i, x, u) ≤ �πt,N2(i, x, u) +∀i ∈ I, x ∈ V, u ∈ {a, g} . +(30) +As a consequence, we have the following proposition. +Proposition 2.5. Consider a configuration of particles Z0 ∈ NV ×NV such that Eω[Z0(x, a)] ≤ +M for all x ∈ V . Let (�πt,N)t≥0 be the process on the infinite graph started in Z0,N. Then, +P ω–a.s., for every I ∈ I, K ⊂ Rn compact, U ⊆ {a, g} and t ≥ 0, the following limit exists +and is finite: +�πt(I, K, U) := lim +N→∞ �πt,N(I, K, U) . + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS15 +Furthermore one has, for all T > 0, +Eω[�πT (I, K, {a, g})] ≤ CKMebT +(31) +where CK = � +x∈K(b−1r(x) + 1). +Notice that we have called the limiting process again (�πt)t≥0, since we have extended the +definition appearing in Corollary 2.4 to a larger set of initial conditions. +Proof. The existence of the limit follows by the monotonicity in (30). Fix any T > 0. We +want to show now that, P ω–a.s., �πt(I, K, {a, g}) does not explode for any compact K ⊂ Rn +and 0 ≤ t ≤ T. Keeping in mind (30), we can use monotone convergence in N to see that +Eω� +sup +t∈[0,T] +�πt(I, K, {a, g}) +� += Eω[�πT (I, K, {a, g})] += lim +N→∞ Eω[�πT,N(I, K, {a, g})] +(27) +≤ CKMebT < ∞ , +(32) +where for the first equality we have used the fact that �πt(I, K, {a, g}) is also monotone in t, +since each new event does not decrease the total number of particles in K. +□ +It follows that supt∈[0,T] �πt(I, K, {a, g}) is finite P ω–almost surely. Notice in particular that +this implies that �πt,N(I, K, U) = �πt,M(I, K, U) for all N, M large enough. If this was not the +case, we would have an infinite sequence of initial particles, coming from arbitrary far away, +whose progeny would enter K before time T, thus making �πT (I, K, {a, g}) explode. +Corollary 2.6. The following holds. +(i) For any t ≥ 0 and f : I × Rn × {a, g} → R measurable and compactly supported in the +second coordinate: +Eω� � t +0 +� +i∈As−,y∈V +��f(i1, y, a) + f(i, Xi +s−, g) − f(i, Xi +s−, a) +�� N +Xi +s−,y +i +(ds) +� +< ∞. +Eω� � t +0 +� +i∈As− +��f(i1, Xi +s−, a) + f(i2, Xi +s−, a) − f(i, Xi +s−, a) +�� N b +i (ds) +� +< ∞. +(ii) For such functions f and t ≥ 0, the following identity holds a.s. +⟨�πt, f⟩ = ⟨�π0, f⟩ + +� t +0 +� +i∈As−, y∈V +� +f(i1, y, a) + f(i, Xi +s−, g) − f(i, Xi +s−, a) +� +N +Xi +s−,y +i +(ds) ++ +� t +0 +� +i∈As− +� +f(i1, Xi +s−, a) + f(i2, Xi +s−, a) − f(i, Xi +s−, a) +� +N b +i (ds). +Proof. The first part is a consequence (28) and (29) for bounded functions f with support on +some compact set C ⊂ Rn, together with (32) which guarantees finiteness. We are left to show +that (�πt) is a solution of equation (12) on any compact set, where now the initial population +can be non bounded. By choosing N0 large, the terms involved in (12) for (�πt,N)t∈[0,T] are all +constant for N ≥ N0, which ends the proof. +□ + +16 +V. BANSAYE AND M. SALVI +Recall that πt is the projection of �πt on alive particles, i.e. for B ⊂ Rn Borel set πt(B) := +�πt(I × (B ∩ V ) × {a}). For every f : Rn → R with compact support, we get +⟨πt, f⟩ = ⟨π0, f⟩ + +� t +0 +� +i∈As−,y∈V +� +f(y) − f(Xi +s−) +� +N +Xi +s−,y +i +(ds) + +� t +0 +� +i∈As− +f(Xi +s−) N b +i (ds) . +(33) +We can now justify that the generator of this process is given by (2) and end the proof of +Theorem 1.1. More precisely, let us check that for all G compactly supported on Rn, +MG +t = ⟨πt, G⟩ − ⟨π0, G⟩ − +� t +0 +LfG(ηs) ds +is indeed a martingale, where we recall that ηt(x) = πt({x}) and for fG(η) = � +x∈V G(x)η(x) +LfG(η) = +� +x,y∈V +η(x)r(x, y) +� +G(y) − G(x) +� ++ +� +x∈V +η(x) +� +b − d +� +G(x). +(34) +The fact that ⟨πt, G⟩ is integrable is due to (31). The fact that Eω[ +� t +0 |LfG|(ηs)ds] is finite is +due to (28) and (29), which allows us to bound this term by Eω[�πt(I, C, {a, g})]. Besides +MG +t = ⟨�π0, f⟩ + +� t +0 +� +i∈As−, y∈V +� +f(i1, y, a) + f(i, Xi +s−, g) − f(i, Xi +s−, a) +� � +N +Xi +s−,y +i +(ds) ++ +� t +0 +� +i∈As− +� +f(i1, Xi +s−, a) + f(i2, Xi +s−, a) − f(i, Xi +s−, a) +� � +N b +i (ds), +where � +Ni and � +N b +i are the compensated Poisson point measures. Again, (28) and (29) pro- +vide the integrability condition for stochastic L1 martingale with jumps, see for example +[Ikeda and Watanabe(1989)]. Thus, MG inherits the martingale property. This ensures that +L provides the generator for functions of the form fG. +3. Input from homogenization +In this section we set the homogenization tools that are needed to prove the hydrodynamic +limit in Theorem 1.2. Notice that the results we collect are mainly inherent to the environment +ω ∈ Ω: the specific particle dynamics we are analyzing only enters in these results through +the generator of the simple random walk LN. +3.1. Assumptions for homogenization on point processes. In [Faggionato(2022a)] Fag- +gionato proves homogenization for a wide class of random walks on purely atomic measures on +Rn under some regularity assumptions for the environment, called (A1),...,(A9). Our proof of +Theorem 1.2 relies on these homogenization results. We first state these assumptions in a sim- +plified way, adapted to our context. We check then that they are indeed satisfied by our model. +Consider the Abelian group G = Rn acting on a probability space (Ω, P, F) in the following +way (see (P1), . . . , (P4) in [Faggionato(2022a)]): for g ∈ G we consider the measurable map +θg : Ω → Ω such that θ0 is the identity; θg ◦θg′ = θg+g′ for all g, g′ ∈ G; the map (g, ω) → θgω +is measurable; P ◦ θ−1 +g += P for all g ∈ G. The group G acts also on the space Rn as space- +translations (τg)g∈G such that τgx = x + g for all g ∈ G and x ∈ Rn. Suppose to have a + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS17 +random purely atomic locally finite non-negative measure µω ∈ M(Rn) +µω = +� +x∈ˆω +nx(ω)δx, +nx(ω) := µω({x}), +ˆω := {x ∈ Rn : nx(ω) > 0} . +Let P0 be the Palm measure associated to P and E0 the associated expectation (see for example +[Faggionato(2022a), equation (9)] for the precise definition or [Daley and Vere-Jones(2008)] for +a more complete account of Palm measures). Finally let r : (ω, x, y) → r(ω, x, y) ∈ [0, ∞) be +the jump rates with r(ω, x, x) = 0 for all x ∈ Rn and ω ∈ Ω, and r(ω, x, y) = 0 when x or y is +not in ˆω. Then the nine assumptions are the following, with Ω∗ some measurable, translation +invariant subset of Ω with P(Ω∗) = 1: +(A1) P is stationary and ergodic w.r.t. (θg)g∈G. That is, P ◦ θ−1 +g += P for all g ∈ G and, for +each A ⊆ Ω such that A = θgA for all g ∈ G, one has P(A) ∈ {0, 1}; +(A2) 0 < E[µω([0, 1)n)] < ∞; +(A3) for all ω ∈ Ω∗ and all g ̸= g′ it holds θgω ̸= θg′ω; +(A4) for all ω ∈ Ω∗, µω is G–stationary: for all x, y ∈ Rn and for all g ∈ G it holds +µθgω = τgµω and r(θgω, x, y) = r(ω, τgx, τgy); +(A5) for all ω ∈ Ω∗ and for all x, y ∈ ˆω it holds nx(ω)r(ω, x, y) = ny(ω)r(ω, y, x); +(A6) for all ω ∈ Ω∗ and for all x, y ∈ ˆω there exists a path x = x0, x1, . . . , xn−1, xn = y +such that r(ω, xi, xi+1) > 0 for all i = 0, . . . , n − 1; +(A7) E0 +� � +x∈ˆω r(ω, 0, x)|x|k� +< ∞ for k = 0, 2; +(A8) L2(P0) is separable; +(A9) setting Nz(ω) := µω(z + [0, 1)n) for z ∈ Zn, it holds E[N 2 +0 ] < ∞ and, for some C ≥ 0, +|Cov(Nz, Nz′)| ≤ C|z − z′|−1. +We prove now these assumptions are satisfied for our model. +Lemma 3.1. The complete graph G = (V, E) on a Poisson point process of parameter γ > 0 in +Rn with transition rates r(x, y) = r(y, x) = e−∥x−y∥ (with the convention r(x, x) = 0) satisfies +assumptions (A1), . . . , (A9) of [Faggionato(2022a)]. +Proof of Lemma 3.1. In our case µω is the point measure associated to the Poisson point +process, so that we have almost surely nx(ω) = 1 for each point x ∈ V . We also notice that +ˆω coincides with V . (A1), (A2) and (A3) clearly hold. (A4) and (A5) also come from the +stationarity of the Poisson point process and from our choice of the rates. (A6) is trivial +since we are considering the complete graph. For (A7) and (A8), we mention that the Palm +measure associated to the underlying Poisson point process can be obtained by just adding an +additional point to the configuration at the origin. (A7) is easy to verify, while for (A8) see +the comment at the end of Section 2.4 in [Faggionato(2022a)]. Finally, Nz is just the number +of points in the box z + [0, 1)n, so that E[N 2 +0 ] = γ2 + γ < ∞ and the covariance appearing in +(A9) is just equal to 0. +□ +3.2. The Poisson equation. Fix ω ∈ Ω. Recall from (4) that LN is the generator of the +diffusively rescaled random walk on V/N := {x/N : x ∈ V (ω)} with transition rates N 2r(·, ·). +We think of LN as acting on functions in L2(µN), where µN = µN(ω) is the uniform measure +on V/N, that is +µN := N −n � +x∈V +δx/N . + +18 +V. BANSAYE AND M. SALVI +We write (·, ·)µN and ∥ · ∥L2(µN ) for, respectively, the scalar product and the norm in L2(µN). +Note that LN is a negative-definite symmetric operator: for any f, g ∈ L2(µN) +(f, LNg)µN = (LNf, g)µN +and +(f, −LNf)µN ≥ 0 . +The following definition is justified by the fact that LN should approach in some sense the +continuous operator σ2∆. +Definition 3.2. Given λ > 0, G ∈ C∞ +c (Rn) and N ∈ N, we define Gλ +N to be the unique +element in L2(µN) such that +λGλ +N − LNGλ +N = HN +(35) +where HN is the restriction to V/N of the function H = H(λ) = λG − σ2∆G ∈ C∞ +c (Rn). +Notice that the introduction of λ > 0 is just an artifice to make λId − LN invertible, where +Id is the identity operator, and that λ will be fixed and play basically no role in what follows. +The idea for introducing Gλ +N is that LNGλ +N is more regular than LNG (for example inequality +(38) here below might fail for a general G). This regularizing procedure is associated to the so- +called corrected empirical measure in the literature, see [Gonçalves and Jara(2008)] for more +comments on this. +The next result is where homogenization theory enters the game, and in particular the +results of [Faggionato(2022a)]. +Lemma 3.3. Fix λ > 0. Then for P-a.a. ω and for each G ∈ C∞ +c (Rn) it holds +(Gλ +N, −LNGλ +N)µN ≤ c(λ, G) +(36) +∥Gλ +N∥L1(µN) , ∥Gλ +N∥L2(µN) ≤ c(λ, G) +(37) +∥LNGλ +N∥L1(µN) , ∥LNGλ +N∥L2(µN ) ≤ c(λ, G) , +(38) +where c(λ, G) > 0 is a constant not depending on N. Furthermore +lim +N→∞ ∥Gλ +N − G∥L1(µN ) = 0 +(39) +lim +N→∞ ∥Gλ +N − G∥L2(µN ) = 0 . +(40) +Proof. We follow the proof of [Faggionato(2010), Lemma 3.1]. By taking the scalar product +with Gλ +N in the left and right hand sides of (35) and using Cauchy-Schwarz inequality we get +λ∥Gλ +N∥2 +L2(µN ) + (−LNGλ +N, Gλ +N)µN = (HN, Gλ +N)µN ≤ ∥Gλ +N∥L2(µN)∥HN∥L2(µN ) . +Notice that Gλ +N ∈ L2(µN) and that supN∈N ∥Hλ +N∥L2(µN ) < ∞, so that we don’t have a trivial +inequality. Recalling that (−LNGλ +N, Gλ +N) ≥ 0 by negativity of the operator LN and noticing +that ∥Gλ +N∥L2(µN ) appears with a square on the l.h.s., we obtain the L2 bound in (37). As a +consequence we also get (36) and, since LNGλ +N = λGλ +N + HN, the L2 bound in (38). +For the L1 bounds we need the integral representation +Gλ +N(x/N) = +� +y∈V +� ∞ +0 +e−λtpN +t (x/N, y/N)HN(y/N) dt +x ∈ V +(41) +where pN +t (x/N, y/N) indicates the probability that the random walk on V/N with generator +LN started at x/N is in y/N at time t ≥ 0, for x, y ∈ V . Since by reversibility pN +t (x/N, y/N) = + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS19 +pN +t (y/N, x/N), +∥Gλ +N∥L1(µN ) ≤ +1 +N n +� +x,y∈V +� ∞ +0 +e−λtpN +t (y/N, x/N)|HN(y/N)| dt +(42) += 1 +λ∥HN∥L1(µN) +N→∞ +−−−−→ 1 +λ∥H∥L1(Rn) < ∞, +(43) +where H = λG− σ2∆G. This proves the first inequality of (37), and the L1 bound for LNGλ +N +in (38) follows as before since supN∈N ∥Hλ +N∥L1(µN ) < ∞. +We move to the homogenization results (39) and (40). By Lemma 3.1 we can apply the +results of [Faggionato(2022a)]. +By [Faggionato(2022a), Theorem 1] we know that Gλ +N → +G in the strong sense described in [Faggionato(2022a), Definition 3.11]. As pointed out in +[Faggionato(2022a)], see discussion after formula (167) therein, if condition (A9) is fulfilled as +in our case, then one also has the L2 convergence in (40), since G is compactly supported. +Finally, we turn our attention to (39). For ℓ > 0, using Cauchy Schwarz inequality, the +quantity ∥Gλ +N − G∥L1(µN ) can be upper bounded by +∥Gλ +N(·)1{∥ · ∥>ℓ}∥L1(µN ) + ∥G(·)1{∥ · ∥>ℓ}∥L1(µN) + #{x ∈ BNℓ ∩ V }1/2 +N n/2 +∥Gλ +N − G∥L2(µN) . +The second term is null for ℓ large enough, since G has compact support. The third term goes +to 0 as N → ∞ since the fraction is P–a.s. converging to a constant while ∥Gλ +N − G∥L2(µN) +goes to 0 by (40). It remains to show that +lim sup +ℓ→∞ +lim sup +N→∞ +∥Gλ +N(·)1{∥ · ∥>ℓ}∥L1(µN) = 0 . +(44) +Since H ∈ C∞ +c (Rn), we can find two non-negative functions H+, H− ∈ C∞ +c (Rn) such that +H− ≤ H ≤ H+ and so, for their restrictions H− +N, H+ +N to VN, it holds H− +N ≤ HN ≤ H+ +N. Call +f λ +N, F λ +N the solutions in L2(µN) of the equations +λf λ +N − LNf λ +N = H− +N +λF λ +N − LNF λ +N = H+ +N . +From (41) we derive that f λ +N, F λ +N are also non-negative and that −f λ +N ≤ Gλ +N ≤ F λ +N on VN. +In particular, in order to prove (44) we can just prove the same equation with f λ +N and F λ +N +instead of Gλ +N. Therefore, without loss of generality, we can just assume that Gλ +N and H are +non-negative, the same proof working for Gλ +N and H non-positive. By (35) and an integral +representation as in (41) we see that in this case the function G such that H = λG − σ2∆G +is non-negative, too. Cauchy Schwarz inequality yields +∥Gλ +N(·)1{∥ · ∥>ℓ}∥L1(µN) = ∥Gλ +N(·)∥L1(µN ) − ∥Gλ +N(·)1{∥ · ∥≤ℓ}∥L1(µN ) +≤ ∥Gλ +N(·)∥L1(µN) − ∥G(·)1{∥ · ∥≤ℓ}∥L1(µN ) + ∥(Gλ +N(·) − G(·))1{∥ · ∥≤ℓ}∥L1(µN ) +≤ ∥Gλ +N(·)∥L1(µN) − ∥G(·)1{∥ · ∥≤ℓ}∥L1(µN ) + #{x ∈ BNℓ ∩ V }1/2 +N d/2 +∥Gλ +N − G∥L2(µN ) . +The third summand goes to 0 as N → ∞ as seen before. To handle the first summand, we +notice that, since Gλ +N and H are non-negative, the inequality in (42) is in fact an equality. +Hence +lim sup +N→∞ +∥Gλ +N(·)1{∥ · ∥>ℓ}∥L1(µN ) = ∥G(·)1{∥ · ∥>ℓ}∥L1(µN ), +which is null for ℓ large enough, since G has compact support. +□ + +20 +V. BANSAYE AND M. SALVI +As a result of having to deal with a non-conservative system, in order to study the hydro- +dynamic limits we will also have to control the L2(µN) norm of LNG. +Lemma 3.4. Let G ∈ C∞ +c (Rn) and n ≥ 2. Then, P–a.s., +lim +N→∞ N −n∥LNG∥L2(µN) = 0 . +Proof. First of all we bound the second moment of ∥LNG∥L2(µN). Call SG the support of G +and indicate with NSG the support blown by a factor N. We have +E +� +∥LNG∥2 +L2(µN ) +� += E +� +N −n � +x∈V +� � +y∈V +N 2r(x, y) +� +G(y/N) − G(x/N) +��2� +(45) +≤ 2N 4−n� +(A) + (B) + (C) +� +, +(46) +where +(A) = E +� +� +x∈NSG∩V +� +� +y∈BR(x)∩V +r(x, y) +� +G(y/N) − G(x/N) +��2� +(B) = E +� +� +x∈NSG∩V +� +� +y∈Bc +R(x)∩V +r(x, y) +� +G(y/N) − G(x/N) +��2� +(C) = E +� +� +x∈(NSG)c∩V +� +� +y∈NSG∩V +r(x, y)G(y/N) +�2� +, +where BR(x) is a ball around x of radius R = log N n. We proceed by estimating separately +the three parts. We can easily deal with part (B) thanks to Slivnyak-Mecke theorem (see +[Moller and Waagepetersen(2003), Theorem 13.3] or [Daley and Vere-Jones(2008), Chapter +13] for more general versions of the theorem), which yields +(B) ≤ ∥G∥2 +∞ +� +x∈NSG +� +y /∈BR(x) +� +r(x, y)2 + r(x, y) +� +z /∈BR(x) +r(x, z) dz +� +dy dx ≤ cN ne−R , (47) +where the factor N n comes from the size of NSG and the factor e−R comes from the internal +integrals. +Developing the square and using again Slivnyak-Mecke theorem, term (C) becomes +(C) = +� +x/∈NSG +� +y∈NSG +r(x, y)2G(y/N)2 dy dx ++ +� +x/∈NSG +� +y∈NSG +� +z∈NSG +r(x, y)r(x, z)G(y/N)G(z/N) dy dz dx. +(48) +Since G ∈ C∞ +c (Rn), G must be Lipshitz with Lipshitz constant, say, K > 0. Call d(x, A) +the distance between x ∈ Rn and the border of the set A ⊂ Rn. Noticing that ∥x − y∥ ≥ +d(x, NSG) + d(y, NSG) if x ̸∈ NSG and y ∈ SG, we see that the first double integral on the +r.h.s. of (48) is smaller than +� +x/∈NSG +� +y∈NSG +e−2(d(x,NSG)+d(y,NSG))� +K d(y, NSG) +N +�2 +dy dx +≤ c1N −2 +� +x/∈NSG +e−2d(x,NSG)N n−1 dx ≤ c2N d−3 + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS21 +with c1, c2 > 0 constants that depend on G. Regarding the triple integral on the r.h.s. of (48) +we can do something similar and bound it by +� +x/∈NSG +e−2d(x,NSG)� � +y∈NSG +e−2d(y,NSG)� +K d(y, NSG) +N +� +dy +�2 +dx ≤ cN 2n−4, +with c > 0 a constant depending on G. Plugging these two last bounds back into (48) we got +(C) ≤ cN 2n−4. +(49) +Finally we turn our attention to (A). We use once more Slivnyak-Mecke theorem and a +first order Taylor approximation and obtain +(A) = +� +x∈NSG +E +�� +� +y∈BR(x)∩V +r(x, y) +� +� +i=1,...,n +yi − xi +N +d +dxj G(x/N) + O +� +∥x − y∥2/N 2���2� +≤ c1N −2∥∇G∥2 +∞ +� +x∈NSG +E +�� +� +y∈BR(x)∩V +r(x, y) +� +∥x − y∥ + ∥x − y∥2/N +��2� +dx +≤ c2N −2∥∇G∥2 +∞ +� +x∈NSG +U(x, R) dx +≤ c3N n−2 +(50) +where we have used the fact that +U(x, R) := +� +y∈BR(x) +� +r(x, y)2∥x − y∥2 + +� +z∈BR(x) +r(x, y)r(x, z)∥x − y∥ ∥x − z∥ dz +� +dy ≤ c +for some c > 0. +We finally put (47), (49) and (50) back into (46) to obtain that +E[∥LNG∥2 +L2(µN)] ≤ cN n . +By Markov inequality we obtain now that, for all ε > 0, +P(N −n∥LNG∥L2(µN) > ε) ≤ c ε−2N −n , +which tells us that the sequence N −n∥LNG∥L2(µN) converges almost completely to 0 for n ≥ 2 +and hence almost surely. +□ +4. A non-conservative Kipnis–Varadhan estimate +Recall the Domination & Convergence Assumption and in particular (6). For constants +ρ > 0 and M ∈ N0, call νM,ρ(·) = νM,ρ(ω, ·) the measure that dominates all initial conditions. +That is, νM,ρ is the product measure on NV such that its restriction on each site x ∈ V is a +Poisson random variable of parameter ρ plus the constant M ∈ N: +νM,ρ +� � +x∈A +[M + nx, ∞) +� += +� +x∈A +� +∞ +� +j=nx +ρje−ρ +j! +� +∀A ⊂ V, (nx)x∈A ∈ N|A| . +(51) +Lemma 4.1. Consider an initial condition given by ν0,ρ, the product of Poisson random +variables of parameter ρ > 0. Under P ω +ν0,ρ, let each particle perform an independent random +walk on V/N with generator LN (without births nor deaths) and call (Yt)t≥0 the evolution of +their configuration, so that Yt(x) is the number of particles in x ∈ V at time t. Let H be a + +22 +V. BANSAYE AND M. SALVI +nonnegative function on V/N belonging to L1(µN) ∩ L2(µN) and such that LNH belongs to +L2(µN). Then for any T, A > 0 it holds +P ω +ν0,ρ +� +sup +0≤t≤T +1 +N n +� +x∈V +Yt( x +N )H( x +N ) > A +� +≤ c(ρ, T)A−1|||H|||N +(52) +P ω +ν0,ρ +� +sup +0≤t≤T +1 +N n +� +x∈V +Yt( x +N )2H( x +N ) > A +� +≤ ˜c(ρ, T)A−1 +� +|||H|||2 +N + N 2−2n � +x∈V +r(x)H( x +N ) (53) +with c(ρ, T) = (ρ2 + ρ + Tρ)1/2, ˜c(ρ, T)2 a polynomial in ρ and T and +|||H|||2 +N := ∥H∥2 +L1(µN) + N −n∥H∥L2(µN ) ∥LNH∥L2(µN ) . +(54) +Remark 4.2. This sort of inequalities are typically carried out for all powers of the number +of particles Y k +t +at once, at the only cost of a constant on the r.h.s. varying with k, see for +example [Faggionato(2010), Lemma 3.2]. In our setting, though, we cannot hope for such a +“clean” result for all values of k, due to the irregularity of the support V = V (ω). In the rest +of the paper we only need k = 1, but we bound here also the case k = 2 for future interest. +Proof of Lemma 4.1. The particle dynamics without births or deaths is reversible with re- +spect to ν0,ρ. Hence, by Kipnis-Varadhan inequality ([Kipnis and Varadhan(1986)], see also +[Kipnis and Landim(1998), Theorem 11.1 in Appendix 1]) we know that, for k ≥ 1, +P ω +ν0,ρ +� +sup +0≤t≤T +1 +N n +� +x∈V +Yt( x +N )kH( x +N ) > A +� +≤ e +A +� +⟨g, g⟩ν0,ρ + T ⟨g, −N 2L∗g⟩ν0,ρ +(55) +where N 2L∗ is the generator of (Yt)t≥0 and g : NV → R is given by +g(η) := +1 +N n +� +x∈V +gx(η)H(x/N), +gx(η) := η(x)k. +Notice that L∗ corresponds to L appearing in (2) with b = d = 0. Now we calculate +⟨g, g⟩ν0,ρ = +1 +N 2n +� +x,y∈V +H(x/N)H(y/N)ν0,ρ[gxgy] ≤ c0(ρ, k)∥H∥2 +L1(µN ) +(56) +where c0(ρ, k) = E[ξ2k +ρ ] indicates the 2k-th moment of ξρ ∼Poisson(ρ). Moving to the second +summand under the root in (55), we write +⟨g, −N 2L∗g⟩ν0,ρ = −N 2−2n � +x,y∈V +H(x/N)H(y/N)ν0,ρ[gx L∗gy] . +(57) +Besides, we have +L∗gy(η) = η(y)r(y) +� +(η(y) − 1)k − η(y)k� ++ +� +z∈V +η(z)r(z, y) +� +(η(y) + 1)k − η(y)k� +. +For x = y, we get +ν0,ρ +� +gx L∗gx +� += c1(ρ, k)r(x) +with +c1(ρ, k) = E +� +ξk+1 +ρ +((ξρ − 1)k − ξk +ρ) +� ++ E[ξρ]E +� +ξk +ρ((ξρ + 1)k − ξk +ρ) +� +≤ 0 . + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS23 +For x ̸= y, using that L∗gy(η) − η(x)r(x, y) +� +(η(y) + 1)k − η(y)k) is independent of η(x) under +ν0,ρ and that ν0,ρ[L∗f] = 0 for all f, +ν0,ρ +� +gx L∗gy +� += ν0,ρ +� +ηk(x) · η(x)r(x, y) +� +(η(y) + 1)k − η(y)k�� ++ ν0,ρ[ηk]ν0,ρ +� +L∗(gy) − η(x)r(x, y) +� +(η(y) + 1)k − η(y)k�� += c2(ρ, k)r(x, y) . +with +c2(ρ, k) = +� +E[ξk+1 +ρ +] − E[ξk +ρ]E[ξρ] +� +E +� +(ξρ + 1)k − ξk +ρ +� +. +When k = 1 we magically have c2(ρ, 1) = −c1(ρ, 1) = ρ, so that +⟨g, −L∗g⟩ν0,ρ = ρN 2−2n� � +x∈V +r(x)H(x/N)2 − +� +x̸=y∈V +r(x, y)H(x/N)H(y/N) +� += ρN −2n � +x∈V +H(x/N) +� +− N 2 � +y̸=x +r(x, y) +� +H(y/N) − H(x/N) +�� += ρN −n⟨H, LNH⟩µN +≤ ρN −n∥H∥L2(µN ) ∥LNH∥L2(µN ) . +Putting this and (56) back into (55) together with the fact that c0(ρ, 1) = E[ξ2 +ρ] = ρ2 +ρ gives +(52). +When k = 2, explicit calculation yield c1(ρ, 2) = −ρ(4ρ2+8ρ+1) and c2(ρ, 2) = 4ρ3+4ρ2+ρ. +They do not cancel out as in the case k = 1 and as a consequence we have another term +appearing from the term ⟨g, −L∗g⟩ν0,ρ, that is +⟨g, −L∗g⟩ν0,ρ ≤ c2(ρ, k)N −n∥H∥L2(µN ) ∥LNH∥L2(µN ) + +��c1(ρ, k) + c2(ρ, k) +�� R +with +R = N 2−2n � +x∈V +r(x)H(x/N)2. +Putting the pieces together as before we obtain (53). +□ +Let us turn to the non-conservative case. +Lemma 4.3. Let ηN +0 +be an initial distribution of particles whose law is dominated by νM,ρ +for some M ∈ N0 and ρ ≥ 0 in the sense of (6). Let H be a nonnegative function on V/N +belonging to L1(µN) and L2(µN). Then there exist a constant c1 = c(M, ρ, T) > 0 and an +absolute constant c2 > 0 such that +P ω� +sup +0≤t≤T +1 +N n +� +x∈V +ηN +t (x)H(x/N) > A +� +≤ A−1c1ec2bT |||H|||N +(58) +for all A > 0, where |||H|||N is defined in (54). +Proof. The probability appearing in (58) can be clearly upper bounded by the probability of +the same event starting with a configuration sampled with νM,ρ. As a first step, we would like +to further bound the initial condition in order to have a pure product of a Poisson number of +particles per site, which will allow us to use the result of Lemma 4.1 in the following. To this +end we first focus on the case M = 1, ρ = 0. In this case we have that the initial condition ν1,0 + +24 +V. BANSAYE AND M. SALVI +is given by a single particle on each site of V . Take two random variables X, Y ∼ Poisson(log 2) +such that +P(X + Y ≥ 1) = 1 . +We can dominate ν1,0 by the random initial condition ¯ν given by the following: the num- +ber of particles on site x ∈ V is given by X(x) + Y (x), with X(x) ∼ X and Y (x) ∼ Y +and (X(x), Y (x))x∈V independent for different x ∈ V . +Now we notice that if we want +N −n � +x∈V ηN +t (x)H( x +N ) to be greater than A, it must be that N −n � +x∈V ηN,X +t +(x)H( x +N ) is +larger than A/2, with ηN,X +t +are the particles descending from initial particles “of type X”, or +N −n � +x∈V ηN,Y +t +(x)H( x +N ) has to be greater than A/2. So with a union bound we get +P ω +ν1,0 +� +sup +0≤t≤T +1 +N n +� +x∈V +ηN +t (x)H( x +N ) > A +� +≤ 2P ω +ν0,log 2 +� +sup +0≤t≤T +1 +N n +� +x∈V +ηN +t (x)H( x +N ) > A/2 +� +. +It is straightforward to generalize the previous argument to the case M ≥ 1 and ρ ≥ 0 which +yields +P ω +νM,ρ +� +sup +0≤t≤T +1 +N n +� +x∈V +ηN +t (x)H( x +N ) > A +� +≤ (M + 1)P ω +ν0,ρ∨log 2 +� +sup +0≤t≤T +1 +N n +� +x∈V +ηN +t (x)H( x +N ) > +A +M+1 +� +. +From this we see that, at the cost of a constant factor depending on M, we can prove (58) +with initial particle configuration ν0,ρ, where we have replaced the original ρ with ρ ∨ log 2. +We use a new labelling notation for the particles, not to be confused with the one appearing +in Section 2.1. The individuals at time 0 are labelled by N. To label their descendants, we +introduce the binary tree +J = ∪k∈N0{1, 2}k. +For j = (j1, . . . , jk) ∈ J , k ∈ N0 and n ∈ N, we write (n, j) = (n, j1, . . . , jk). In particular, +(n, j) = (n, j1, ..., jk) is an individual of generation |j| = k. When a particle (n, j) ∈ N×{1, 2}k +reproduces, it disappears and generates particles (n, j1, ..., jk, 1) and (n, j1, ..., jk, 2). For a +subset A = I × J with I ⊂ N and J ⊂ J , we write (ηN,A +t +) for the process restricted to the +subset of particles labelled by elements of A, that is, at time t we look at ηN +t +and ignore all +the particles with labels not belonging to A. Since +ηN +t = +� +j∈J +ηN,N×{j} +t +we have +ΣN +T := sup +0≤t≤T +1 +N n +� +x∈V +ηN +t (x)H(x/N) ≤ +� +j∈J +ΣN,j +T +where +ΣN,j +T +:= sup +0≤t≤T +1 +N n +� +x∈V +ηN,N×{j} +t +(x)H(x/N) . +Using that � +j∈J 4−|j| = � +k≥0 2k4−k = 2, we can bound +P ω +ν0,ρ(ΣN +T ≥ A) ≤ P ω +ν0,ρ +� +∪j∈J {ΣN,j +T +≥ 4−|j|A/2} +� +≤ +� +j∈J +P ω +ν0,ρ +� +ΣN,j +T +≥ 4−|j|A/2 +� +. +(59) + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS25 +The key point is now to see that the process (ηN,N×{j} +t +) can be dominated by another +process (Y N,j +t +), obtained by a percolation procedure on the initial distribution of particles. +More precisely, Y N,j +0 +is obtained from ηN +0 as follows: for ℓ ∈ N, the particle with label ℓ in ηN +0 +is kept in Y N,j +0 +only if particle (ℓ, j) is born before time T for the process (ηN +t ). Notice that +this happens with probability +pj = P(Poisson(bT) ≥ |j|) = e−bT � +k≥|j| +(bT)k +k! +(60) +since, along each lineage, birth events follow a Poisson process with intensity b. If present in +Y N,j +0 +, then, particle ℓ evolves in the process (Y N,j +t +) by following the trajectory of (ℓ, j) and +its ancestors in (ηN +0 ); once (ℓ, j) has disappeared in (ηN +0 ), the particle continues to evolve +following the trajectory of any lineage of descendants of (ℓ, j). From this coupling, it is clear +that, for all t ∈ [0, T] and j ∈ J , +ηN,N×{j} +t +≤ Y N,j +t +and we have obtained +P ω +ν0,ρ +� +ΣN,j +T +≥ 4−|j|A/2 +� +≤ P ω +ν0,ρ +� +sup +0≤t≤T +1 +N n +� +x∈V +Y N,j +t +(x)H(x/N) > 4−|j|A/2 +� +. +At this point, we can use Lemma 4.1 for the process ( ˜Y N,j +t +)t∈[0,T] on V/N, with ˜Y N,j +t +(x/N) = +Y N,j +t +(x) for all x ∈ V and t ∈ [0, T], since for this process the present particles just perform +independent random walks on V/N generated by LN. We notice furthermore that the initial +particles of process (Y N,j +t +) have distribution ν0,ρpj, cfr. (60). Hence +P ω +ν0,ρ +� +sup +0≤t≤T +1 +N n +� +x∈VN +Y N,j +t +(x)H(x/N) > 4−|j|A/2 +� +≤ 4|j|2A−1c(ρpj, T)|||H|||N +where the function c(·, ·) is the same appearing in Lemma 4.1. Going back to (59) we have +obtained +P ω +ν0,ρ(ΣN +T ≥ A) ≤ +� +j∈J +4|j|2A−1c(ρpj, T)|||H|||N ≤ ¯c(ρ, T)A−1 � +j∈J +4|j|p1/2 +j +. +(61) +Recall that if X ∼ Poisson(λ) one has the bound P(X ≥ t) ≤ e−λ(eλ/t)t for all t > λ (see +for example [Vershynin(2018), Exercise 2.3.3]). Using also that the number of j’s of length ℓ +is 2ℓ, we set ¯ℓ = ⌈81ebT⌉ and compute +� +j∈J +4|j|p1/2 +j +≤ +¯ℓ +� +ℓ=0 +8ℓ + +∞ +� +ℓ=¯ℓ+1 +8ℓ�ebT +ℓ +�ℓ/2 +≤ cec2bT +with c, c2 > 0 absolute constants, which together with (61) yields the result of the lemma. +□ +5. Proof of Theorem 1.2 +As in Section 2, through the whole section we fix some realization of the underlying graph +ω ∈ Ω sampled according to measure P. All the processes in what follows will evolve under +measure P ω, and all the claims have to be intended to be true P–almost surely. + +26 +V. BANSAYE AND M. SALVI +5.1. An L2 martingale. In this section we will pave the way for the proof of tightness of the +sequence of process +� +(⟨πN +t , G⟩)t∈[0,T] +� +N and identification of the limit. For G ∈ C∞ +c (Rn) let us +define the process +MN +t += MN +t (Gλ +N) := ⟨πN +t , Gλ +N⟩ − ⟨πN +0 , Gλ +N⟩ − +� t +0 +⟨πN +s , LNGλ +N + bGλ +N⟩ds . +(62) +By Lemma 3.3 and Lemma 4.3, we know that MN is almost surely well defined when starting +from some ηN +0 satisfying (6). We aim at proving the following result: +Lemma 5.1. Consider a sequence of initial configurations (ηN +0 )N∈N satisfying the Domination +& Convergence Assumption. For all ε > 0 and for all G ∈ C∞ +c (Rn) it holds +lim +N→∞ P ω� +sup +0≤t≤T +��MN +t +�� ≥ ε +� += 0 . +In fact, we will not only show Lemma 5.1, but also that MN is a square integrable martingale +which converges in L2 to 0 and obtain a speed of convergence, see next Lemma 5.2. To do so, +we will use a truncation argument and exploit the results already obtained in Section 2.3 while +constructing the process. More precisely, for any a ∈ N, consider the process (πN,a +t +)t∈[0,T] (and +the corresponding (ηN,a +t +)t∈[0,T]) where the initial configuration of particles is truncated outside +the box [−a, a]n, that is, only the particles in the finite set V ∩ [−a, a]n are retained for the +initial configuration and all the others are deleted. By (16), we know that all the particles of +the process πN,a a.s. remain in a finite box during time interval [0, T] (recall that �πBN +t +was the +process restricted to a box of size BN and that for finite initial conditions πN +t was obtained as +the restriction to the second coordinate of the limit for N → ∞ of �πBN +t +, cfr. Corollary 2.4 and +(13)). It follows that the number of births and of jumps is a.s. finite in a finite time interval +and therefore the following equation holds for any locally bounded function H: +⟨πN,a +t +, H⟩ = ⟨πN,a +0 +, H⟩ + 1 +N n +� t +0 +� +R+ +� +x,y∈V +1{u≤ηN,a +s +(x)N2r(x,y)} +� +H(y/N) − H(x/N) +� +N x,y(ds, du) ++ 1 +N n +� t +0 +� +R+ +� +x∈V +1{u≤bηN,a +s +(x)} H(x/N) Qx(ds, du) . +(63) +Notice that we have adopted here a slightly different description of the process for convenience. +The underlying Poisson point processes are indexed by sites and not by individuals as before. +That is, measures N x,y and Qx with intensity ds du on R2 ++ are replacing the previous N Xi +s,y +i +and N b +i . Equation (63) can be rewritten as +⟨πN,a +t +, H⟩ = ⟨πN,a +0 +, H⟩ + +� t +0 +⟨πN,a +s +, LNH + bH⟩ ds + MN,a +t +(H), +(64) +where MN,a(H) is defined by +MN,a +t +(H) = 1 +N n +� t +0 +� +R+ +� +x,y∈V +1{u≤ηN,a +s +(x)N2r(x,y)} +� +H(y/N) − H(x/N) +� � +N x,y(ds, du) ++ 1 +N n +� t +0 +� +R+ +� +x∈V +1{u≤bηN,a +s +(x)} H(x/N) � +Qx(ds, du), +and � +N x,y and �Qx are the compensated measures of N x,y and Qx. + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS27 +We turn our attention to H = Gλ +N. On the one hand, ⟨πN,a +t +, Gλ +N⟩ increases a.s. as a → ∞ +to ⟨πN +t , Gλ +N⟩, which is a.s. finite (using for example (58) and Lemma 3.3). On the other hand, +the fact that LNGλ +N = λGλ +N − HN (cfr. (35)) and (58) ensure that +� t +0 +⟨πN +s , |LNGλ +N| + bGλ +N⟩ ds < ∞ +a.s. +and it follows by bounded convergence that a.s. +lim +a→∞ +� t +0 +⟨πN,a +s +, LNGλ +N + bGλ +N⟩ ds = +� t +0 +⟨πN +s , LNGλ +N + bGλ +N⟩ ds . +We obtain from (64) that for any t ≥ 0, MN,a +t += MN,a +t +(Gλ +N) converges a.s. as a → ∞ to MN +t , +which is given by (74) and is a.s. finite. +To wrap up, we have defined a càdlàg process (MN +t )t∈[0,T] satisfying identity (74) and such +that, for any t, MN +t +is the a.s. limit of MN,a +t +, defined as an integral against compensated jump +measures. Let us check now that these processes are also square integrable martingale and +that they tend to 0 in L2 and probability as N → ∞. This in particular implies Lemma 5.1. +Lemma 5.2. For any N ≥ 1 and a > 0, MN,a and MN are càdlàg square integrable martin- +gales and, for any T > 0, +Eω� +sup +t≤T +� +MN,a +t +− MN +t +�2� a→∞ +−→ 0 . +Furthermore, for any a > 0 and N ≥ 1, +Eω� +sup +t≤T +(MN,a +t +)2� ++ Eω� +sup +t≤T +(MN +t )2� +≤ CT +N n +(65) +for some constant CT which only depends on T. +Proof. We first prove that MN,a = MN,a(Gλ +N) is a square integrable martingale. Its quadratic +variation is +⟨MN,a⟩t = N 2 +N 2n +� t +0 +� +x,y∈V +ηN,a +s +(x)r(x, y) +� +Gλ +N(y/N) − Gλ +N(x/N) +�2 +ds ++ +1 +N 2n +� t +0 +� +x∈V +bηN,a +s +(x)Gλ +N(x/N)2 ds . +Since Eω[ηN,a +s +(x)] ≤ Eω[ηN +s (x)] ≤ Cebs (cfr. equation (31) and recall that ηs is the projection +on alive a–particles for Zs) we get +Eω � +⟨MN,a⟩t +� +≤ C′ebt� N 2 +N 2n +� +x,y∈V +r(x, y) +� +Gλ +N(y/N) − Gλ +N(x/N) +�2 ++ +1 +N 2n +� +x∈V +bGλ +N(x/N)2� +. +(66) +Rewriting +r(x, y) +� +Gλ +N(y/N) − Gλ +N(x/N) +�2 += −r(x, y)Gλ +N(x/N) +� +Gλ +N(y/N) − Gλ +N(x/N) +� +− r(y, x)Gλ +N(y/N) +� +Gλ +N(x/N) − Gλ +N(y/N) +� + +28 +V. BANSAYE AND M. SALVI +we obtain +Eω � +⟨MN,a⟩t +� +≤ C′′e2bt +N n +� +(Gλ +N, −LNGλ +N)µN + ∥Gλ +N∥2 +L2(µN ) +� +(67) +which is finite by (36) and (37). It follows that MN,a is a square integrable martingale and +using Doob’s inequality we also obtain the relative L2 bound appearing in (65). +We prove now by Cauchy criterion that MN,a converges to some right-continuous square +integrable martingale, since the space of L2 right-continuous martingales is complete (see +e.g. [Ikeda and Watanabe(1989), Lemma 2.1]). By uniqueness, this limit will then have to be +MN. Notice that (65) will automatically follow, since the L2 bound for MN can be derived +from that of MN,a taking the limit. More precisely let a < a′. Then +MN,a′ +t +− MN,a +t += 1 +N n +� t +0 +� +R+ +� +x,y∈V +1� +ηN,a +s + 0, +lim +N→∞ P ω� +sup +0≤t≤T +��⟨πN +t , Gλ +N⟩ − ⟨πN +t , G⟩ +�� ≥ ε +� += 0 . +(68) +This fact can be shown by using Lemma 4.3 to bound the probability in (68) by +ε−1c1ec2bT � +∥Gλ +N − G∥2 +L1(µN ) + N −n∥Gλ +N − G∥L2(µN )∥LN(Gλ +N − G)∥L2(µN) +and then applying Lemma 3.3 and Lemma 3.4 to see that this converges to 0 as N → ∞. +□ +Proof of Lemma 5.3. By Lemma 5.4 we will just have to prove tightness of (⟨πN +t , Gλ +N⟩)t∈[0,T]. +We will use Aldous criterion, see for example [Kipnis and Landim(1998), Section 4: Proposi- +tion 1.2 and Proposition 1.6]. Let us work with the set TN(θ) of couples (τ, h) such that τ is +a stopping time for the process (ηN +t )t∈[0,T] and h ∈ R is such that 0 ≤ h ≤ θ and τ + h ≤ T. +Using identity (74) we have +⟨πN +τ+h − πN +τ , Gλ +N⟩ = +� τ+h +τ +⟨πN +s , LNGλ +N + bGλ +N⟩ds + MN +τ,τ+h(G), +(69) +where MN +τ,τ+h(G) = MN +τ+h(G) − MN +τ (G). +Equation (3.2) allows us to write LNGλ +N = λ(Gλ +N − G) + σ2∆G, so that +���� +� τ+h +τ +⟨πN +s , LNGλ +N⟩ ds +���� ≤ h sup +0≤s≤T +⟨πN +s , λ|Gλ +N − G| + |σ2∆G|⟩. +We want to use Lemma 4.3 to this quantity and then take the limit θ ↓ 0. +Using that +∥Gλ +N −G∥L1(µN), ∥Gλ +N −G∥L2(µN ) and ∥LNGλ +N∥L2(µN ) are bounded by Lemma 3.3, using that +G is compactly supported and using also Lemma 3.4, we obtain +lim +θ↓0 +sup +(τ,h)∈TN(θ) +N≥1 +P ω +����� +� τ+h +τ +⟨πN +s , LNGλ +N⟩ ds +���� ≥ δ +� += 0 +for any δ > 0. Similarly we also see that +lim +θ↓0 +sup +(τ,h)∈TN(θ) +N≥1 +P ω +����� +� τ+h +τ +⟨πN +s , bGλ +N⟩ ds +���� ≥ δ +� += 0 . + +30 +V. BANSAYE AND M. SALVI +Finally we also know that the contribution of MN +τ,τ+h(G) is negligible by Lemma 5.1. All in +all equation (69) combined with these estimates yields +lim +δ↓0 lim sup +N→∞ +sup +(τ,h)∈TN(θ) +P ω � +|⟨πN +τ+h, Gλ +N⟩ − ⟨πN +τ , Gλ +N⟩| ≥ 3δ +� += 0 +for any δ > 0, which ends the proof. +□ +5.3. Proof of identification and convergence. We are finally ready to identify the limiting +points of (πN +t )t∈[0,T] as deterministic measure-valued processes with density. We will show that +this limit can be characterized as the weak solution of (9). +Lemma 5.5. For P − a.a. ω the following holds. Consider a sequence of initial configurations +(ηN +0 )N∈N satisfying the Domination & Convergence Assumption. For all ε > 0 and G ∈ C(Rn), +it holds +lim +N→∞ P ω� +sup +0≤t≤T +��� +� t +0 +⟨πN +s , Gλ +N − G⟩ ds +��� > ε +� += 0 . +Proof. As a direct consequence of Lemma 4.3 it holds +P ω� +sup +0≤t≤T +��� +� t +0 +⟨πN +s , Gλ +N − G⟩ ds +��� > ε +� +≤ ε−1Tc1ec2bT |||Gλ +N − G|||N +with |||·||| defined in (54). As in the proof of Lemma 5.4, one concludes by observing that +|||Gλ +N − G|||N goes to 0 as N → ∞. +□ +Thanks to Lemma 5.4, we can consider now a limiting value of of (πN +t )t∈[0,T], call it +(πt)t∈[0,T], in the space D([0, T], M), where M = M(Rn) is as usual endowed with the vague +topology. Noticing that (35) is equivalent to +LNGλ +N = λ(Gλ +N − G) + σ2∆G , +we can use representation (74) and put together the results of Lemma 5.1 and Lemma 5.5 and +(68) to infer that for any ε > 0 +lim +N→∞ P ω� +sup +t∈[0,T] +���⟨πN +t , G⟩ − ⟨πN +0 , G⟩ − +� t +0 +⟨πN +s , σ2∆G + bG⟩ ds +��� > ε +� += 0 . +(70) +As ⟨πN +t , G⟩ converges to ⟨πt, G⟩ along a subsequence, we get, for any G ∈ C∞ +c (Rn), +⟨πt, G⟩ = ⟨π0, G⟩ + +� t +0 +⟨πs, σ2∆G + bG⟩ ds +(71) +P–almost surely. We use now classical techniques to check that the solutions have a density +and prove uniqueness of the limiting problem. The result may be classical, even if the fact +that our domain is non bounded or our weak formulation make that we do not know the +appropriate reference. For convenience of the reader, we provide the proof. +Lemma 5.6. i) For P-a.a. ω the following holds. Consider a sequence of initial configurations +(ηN +0 )N∈N satisfying the Domination & Convergence Assumption for some ρ0. +Consider a +limiting point π of πN in the space D([0, T], M). Then π satisfies Equation (71), with all +terms well defined, for any G in +G = {G ∈ C∞(Rn) ∩ L1(dx) : σ2∆G ∈ L1(dx)}. + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS31 +ii) Equation (71) has a unique weak solution in the space cadlag positive Radon measure valued +function. Besides this solution π has a bounded density : πt(dx) = ρt(x)dx a.s., for a.e. t ≥ 0, +and ρ· ∈ L∞([0, T] × Rn, R+). +The second part of the statement ensures that the limiting processes π are deterministic. +For the proof of the existence of a density, we exploit our results coming from Kipnis Varadhan +estimates, but one may also invoke results focusing on the solution of (71). +Proof. Let us prove (i) and consider G ∈ G non-negative. Approximate G with non-negative +functions Gk ∈ C∞ +c (Rn) such that Gk(x) = G(x) for x ∈ Bk, with Bk the ball of radius +k centered in the origin, and Gk(x) ≤ G(x) for x /∈ Bk. +Then, as k → ∞, dominated +convergence theorem yields that ⟨πt, Gk⟩ → ⟨πt, G⟩, ⟨π0, Gk⟩ → ⟨π0, G⟩ and +� t +0⟨πs, bGk⟩ ds → +� t +0⟨πs, bG⟩ ds. It follows that also G satisfies (71), the r.h.s. being well defined and integrable +by part (i). +Let us move to (ii). Let us move to (ii). For the purpose, we consider the subspace of +signed Radon measures on Rn associated with the norm +∥µ∥TV = +sup +f∈G, ∥f∥L1(dx)≤1 +|⟨µ, f⟩| . +We also introduce the space GT of functions G : [0, T] × Rn → R that verify the following +properties: G(t, .) ∈ G for any t ∈ [0, T]; supt∈[0,T] +� +∥G(t, .)∥L1(dx) + ∥σ2∆G(t, .)∥L1(dx) +� +< +∞; s → G(s, x) is differentiable for any x ∈ Rn and sups∈[0,T] |∂sG(s, .)| ∈ L1(dx). +By +approximating G(·, ·) ∈ GT with a function which is piece-wise linear in time and proceeding +as in [Gonçalves and Jara(2008), Section 3], we observe that (71) can be extended to functions +in GT . This is the weak form of (9). +Finally, to get uniqueness, consider GT (t, x) = PT−tϕ(x) with ϕ ∈ G where Ptf(x) = +E[f(x+Bt)] is the semigroup of the Brownian motion with generator σ2∆. Then ∂sGT (s, ·) = +−σ2∆GT (s, ·) and GT ∈ GT and +⟨πt, GT (t, .)⟩ = ⟨π0, G(0, .)⟩ + +� t +0 +⟨πs, bGT (s, .)⟩ ds. +This implies, for two solutions π1 and π2 of Equation (71) with same initial values π0, +⟨π1 +T − π2 +T, ϕ⟩ = +� T +0 +⟨π1 +s − π2 +s, bGT (s, ·)⟩ ds +Adding that ∥GT (t, .)∥L1(dx) = ∥ϕ∥L1(dx), we obtain +∥π1 +T − π2 +T ∥TV ≤ b +� T +0 +∥π1 +s − π2 +s∥TV ds . +Gronwall lemma yields π1 = π2 and uniqueness of the solution (71) in the space of positive +Radon measure is proved. +Finally we prove that this solution admits a density. We can exploit the first part i) and +see this solution π as the limit of our sequence of processes. Take H ∈ C∞ +c +to be a nonnegative +function on V/N belonging to L1(µN) and L2(µN). Letting N → ∞ in equation (58), we +invoke Portemanteaux theorem and the fact that the supremum over [0, T] of ⟨ · , H⟩ is a + +32 +V. BANSAYE AND M. SALVI +continuous functional on D([0, T], M(Rn)) to obtain +P ω +ρ0 +� +sup +0≤t≤T +⟨πt, H⟩ > A +� +≤ A−1c(T)∥H∥L1(dx) +(72) +with c(T) = c1ec2bT . On the r.h.s. we have used the fact that N −n ∥LNH∥L2(µN) converges +a.s. to 0 using Lemma 3.4 and that ∥H∥L1(µN) tends a.s. to ∥H∥L1(dx). For a given ω the +process πt must be deterministic, since it verifies (71). Hence, by (72), we have obtained that +sup +0≤t≤T +⟨πt, H⟩ ≤ c(T)∥H∥L1(dx) . +for any H ∈ C∞ +c . By approximation, this identity can be extended to any non-negative H ∈ +L1(dx). As a consequence, the process πt is concentrated on measures absolutely continuous +with respect to the Lebesgue measure and we call ρt(x) its density at time t ∈ [0, T] and for +any t ∈ [0, T], ρt ≤ C(T) a.e. and ρ ∈ L∞([0, T] × Rn, R+). +It follows that also G satisfies (71), the r.h.s. being well defined and integrable by part (i). +□ +5.4. Proof of Theorem 1.2 with mortality (d > 0). We want to adapt the proof of +Theorem 1.2 to the case where particles die at rate d > 0. +As mentioned before, the construction of the process (ηt)t∈[0,T] is still valid when d > 0. By +following the proof of Section 2 one can prove that the corresponding measure-valued process +satisfies an equivalent of (33) that now reads, for G : Rn → R with compact support, +⟨πt, G⟩ = ⟨π0, G⟩ + +� t +0 +� +i∈As,y∈V +� +G(y) − G(Xi +s) +� +N Xi +s,y +i +(ds) ++ +� t +0 +� +i∈As +G(Xi +s) N b +i (ds) − +� t +0 +� +i∈As +G(Xi +s) N d +i (ds) +(73) +where (N b +i )i∈I is another collection of independent Poisson point measures on R+ with inten- +sity d dt, independent from the N x,y +i +’s and the N b +i ’s. +There is no problem in updating the technical tools presented in Section 3 and Section 4 to +the case d > 0. The homogenization results do not depend on the specific particle dynamics, +so the results of Lemma 3.3 are needed as they are. +The non-reversible Kipnis-Varadhan +estimate Lemma 4.3 holds when d > 0, too. To see that, one can couple the dynamics with +d = 0 and the one with d > 0 in a way that guarantees that the supremum in equation (58) +always decreases (for example, one can use the same Poisson processes for generating jumps +and births of the particles). +The results of Section 5.1 still hold for the process +MN +t += ⟨πN +t , Gλ +N⟩ − ⟨πN +0 , Gλ +N⟩ − +� t +0 +⟨πN +s , LNGλ +N + (b − d)Gλ +N⟩ds . +(74) +In particular, MN is a square integrable martingale and it satisfies Lemma 5.1. This can be +again achieved by the same truncation argument. Finally the proofs of tightness, identification +and convergence (corresponding to Section 5.2 and Section 5.3) for d > 0 follow those of the +case d = 0 as they are fundamentally based on the homogenization results and on the non- +reversible Kipnis-Varadhan estimates. + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS33 +6. Extension of the results and perspective +6.1. Extension to random graphs. For the sake of clarity we have made the choice to state +and prove our main results for the particle system evolving over the complete graph G = (V, E), +where V are the points of an homogeneous Poisson point process of intensity γ > 0 on Rn, +with n ≥ 2. In this section we discuss how one can consider a broader class of graphs. To this +end, we will consider graphs G = (V , E) with V ⊆ V and E ⊆ E obtained under measure P +(eventually enlarging the probability space Ω) by performing a random percolation procedure +on the edges of G. For a graph G we say that the particle system evolves on G when the particles +move on the nodes V with transition rates r(x, y) substituted by r(x, y) := r(x, y)1{{x,y}∈E}. +The next theorem is the generalization of Theorem 1.1. In this case one can perform any +percolation procedure on the bonds of the complete graph G. Notice that we include cases +where the graph becomes disconnected. +Theorem 1.1’. For P-a.a. realizations of the underlying Poisson point process, the following +holds. Let G = (V , E) be any graph such that V = V and E ⊆ E. Let η0 be a random +variable on NV such that E[η0(x)] ≤ M for all x ∈ V , for some M ∈ N. +Then, for all +T > 0, there exists a process (ηt)t∈[0,T] with initial value η0 and paths in the Skohorod space +D([0, T], NV ) and generator L given by the following. For each function fG : NV → R of the +form fG(η) = � +x∈V G(x)η(x) with G compactly supported on Rn, it holds +LfG(η) = +� +x,y∈V +η(x)r(x, y) +� +G(y) − G(x) +� ++ +� +x∈V +η(x) +� +b − d +� +G(x) . +Proof. The existence of the process on the finite graph as in Section 2.2 goes easily through. +Also the quantitative estimates of Lemma 2.1 work with r(x) substituted by the corresponding +quantity r(x) = � +y∈V r(x, y). For the case of a finite number of initial particle, cfr. Section +2.3, we notice that Lemma 2.3 is true for the percolated graph since r(x) ≤ r(x). This in turn +implies the equivalent of the key Proposition 2.2 and hence of the equivalent of Corollary 2.4. +The existence of the process with an infinite number of initial particles can be checked then +by following the proof of Section 2.4. +□ +Generalizing the results of Theorem 1.2 is much more subtle. In this case one is not autho- +rized to freely percolate the edges of G. Indeed, our proof crucially relies on the homogenization +results discussed in Section 3.1. On the other hand, as long as the assumptions that imply +homogenization are fulfilled, our machinery continues to work and we obtain the following +more general version of Theorem 1.2. +Theorem 1.2’. Consider under P a random graph G = (V , E), with V ⊆ V and E ⊆ E +verifying conditions (A1),. . . ,(A9) of Section 3.1. The results of Theorem 1.2 hold for the +particle system evolving on G. In this case, the matrix σ∆ appearing in (9) has to be substituted +by the matrix D such that, for any a ∈ Rn, +a · Da = 1 +2 +inf +ψ∈L∞(P0) E0 +� � +y∈V +¯r(0, y) +� +a · y + ψ(θyω) − ψ(ω) +�2� +(75) +where θyω is the shift defined in Section 3.1. +Proof. The homogenization results of Lemma 3.3 are still valid exactly because (A1),. . . , +(A9) have been chosen so that they would work. Lemma 3.4 works since V ⊆ V and since + +34 +V. BANSAYE AND M. SALVI +¯r(x, y) ≤ r(x, y) for all x, y ∈ V . The non-conservative Kipnis-Varadhan estimate of Lemma +4.3 carries through to the edge-percolated case since the reversibility of the process without +births and deaths is maintained on G. Sections 5.1, 5.2 and 5.3 follow a general strategy +that relies on the previous estimates and remains substantially identical for the percolated +graph. +□ +Remark 6.1. It is possible, in principle, that the diffusion matrix described by (75) is degen- +erate. +To give more substance to Theorems 1.1’ and 1.2’, we exhibit now two well-known models +that are obtained from the complete graph G = (V, E) via a bond-percolation procedure. For +both of them we check that assumptions (A1),. . . ,(A9) are in fact satisfied, at least in some +range of the parameters. +Long-range percolation. Long-range percolation is a well studied random graph model, +usually defined on the Zn lattice, see for example [Berger(2002), Hutchcroft(2021)] and refer- +ences therein. Extending its definition to continuous space (as for example in [Penrose(1991)]), +we consider again the set of vertices V given by a Poisson point process of intensity γ > 0 +and the set of all possible edges E. Under P, independently for each {x, y} ∈ E we retain the +edge with probability 1 − e−β∥x−y∥−α and delete it otherwise, with α, β > 0 two parameters +of the model. Theorem 1.1’ guarantees that our particle process on this structure is always +well-defined. +To study the hydrodynamic limit as in Theorem 1.2’ we need some adjustment. The main +problem arising from the percolation procedure, in fact, is that the resulting graph might not +be connected and (A6), for example, would not stand. To overcome this issue we must consider +parameters α, β and γ that guarantee the existence of a unique infinite giant component for +the graph with P–probability 1 (see [Penrose(1991)] for results on the existence of an infinite +component in continuous long-range percolation). Then we define V to be the set of vertices +in the infinite component of the graph and E to be the set of retained edges connecting points +of V . More precisely, to use the notation of Section 3.1, we identify ˆω with the points in the +infinite component and let nx(ω) = 1 for x ∈ ˆω and nx(ω) = 0 otherwise. We show now that +(A1),. . . ,(A8) hold and discuss (A9) after. (A1) comes from the stationarity and ergodicity +of the Poisson point process combined with the independence of the percolation procedure. +Conditions (A2), (A3) and (A7) are inherited from the underlying Poisson point process. (A4) +and (A5) are straightforward. (A6) is clear since we have restricted ourselves to the infinite +component and every jump within it has positive probability. (A8) can be also deducted from +the analogous property for the Poisson point process. The only point left to check is the decay +of correlation (A9). Clearly E[N 2 +0 ] < ∞. The fact that |Cov(Nk, Nk′)| ≤ C0|k − k′|−1 is more +delicate, since Nk might depend on the structure of the configuration of the infinite cluster in +a far away box k′ + [0, 1)n. While we believe the bound to be true, we could not find a proper +reference in the literature and its proof might be technically involved. One way to avoid this +problem, is to consider the parameters α and β in the range where all the points of V belong +to the infinite component. This happens, for example, when α < n. In this case (A9) is clearly +verified. +Scale-free percolation. Scale-free percolation is an inhomogeneous version of the long- +range percolation model. It was originally introduced with nodes placed on the lattice Zn in +[Deijfen et al.(2013)Deijfen, van der Hofstad, and Hooghiemstra] and then also studied on a +Poisson point process in [Deprez and Wüthrich(2018)] and [Dalmau and Salvi(2021)]. Under + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS35 +measure P, we let V be the realization of a Poisson point process of parameter γ > 0 and +assign independently to each vertex x ∈ V a random weight Wx ∈ [1, ∞) such that P(Wx > +w) = w−(τ−1)L(w) for some τ > 1 and with L a slowly varying function. Then, independently +for each edge of the complete associated graph {x, y} ∈ E, we retain the edge in E with +probability 1 − e−βWxWy∥x−y∥−α for some α, β > 0 and delete it otherwise. +The particle +system on the resulting graph is again well defined by Theorem 1.1’. +For Theorem 1.2’ to work, we restrict to values of α, β, γ and τ such that a unique infinite +component V exists with P–probability 1, see [Deprez and Wüthrich(2018)] for the precise +range of parameters. +The random graph G = (V , E) induced on the infinite component +equipped with rates r(x, y) verifies (A1),. . . ,(A8). This can be deducted as for long-range +percolation. To verify (A9) we have the same problem as before with correlations. To be on +the safe side, one can take α ≤ n or α(τ − 1)/n ≤ 1, which guarantees all the points of V to +be in the infinite cluster, see [Dalmau and Salvi(2021)]. +6.2. Open problems and perspectives. The methods we present in our paper can be +easily used for a wider class of models. First of all, we point out that the reason for choosing a +Poisson point process as a support is mainly due to the seek of clarity rather than to technical +obstacles of dealing with more general settings. It should be possible to replace the Poisson +point process with any sufficiently regular simple point process satisfying the nine conditions +of [Faggionato(2022a)], see Section 3.1. Likewise, the transition rates r(x, y) do not have to +assume the exact form e−∥x−y∥: any rates decaying sufficiently fast (possibly depending on +the dimension n of the space) should work. +A more challenging task would be generalizing the birth and death mechanism of the par- +ticles: the techniques being used in this paper rely on the linearity of the birth rate b and +of the death rate d. By a domination argument, it is possible to show that whenever one +chooses a bounded birth rate, the corresponding process is well defined. The methods for +proving tightness can be extended, too, but the identification of the limit becomes a more +difficult matter. A motivating and realistic generalization would be to consider a death rate +d = dx(η) that depends on the amount of particles present at each given site and b con- +stant. +A classical example is the local logistic model with death rate dx(η) = d + cη(x), +where c > 0 is a competition factor. In this case, when the population becomes large on each +site, we expect the limiting density ρt(x) to satisfy a reaction-diffusion equation of the kind +∂tρ = σ∆ρ + (b − d − cρ)ρ. Otherwise, if the population remains of order of magnitude 1 +on each site, we expect rapid stirring (see e.g. [Durrett(1995)] or [Perrut(2000)]). In this case +the limiting reaction coefficient should come from an averaging procedure of local population +size in quasi-state stationary law. This follows the spirit of the replacement lemma used for +zero-range processes in statistical mechanics, see [Kipnis and Landim(1998)], which, roughly +put, allows one to compare the particle density on microscopic boxes with the particle density +on macroscopic ones. Another interesting extension would be to consider births (or deaths) +that depend via a non-local kernel on the population size in a surrounding region. This would +mimic the inclination of individuals to reproduce less in crowded communities and would give +further importance to the spatial aspect of the particle system. Again we believe that rapid +stirring techniques can be applied to tackle this problem. +Finally, in this paper we have worked with a scaling corresponding to a fast motion, com- +pared to the population dynamics or epidemics. In fact, the jump rates carry an additional N 2 + +36 +V. BANSAYE AND M. SALVI +factor, while births and deaths do not. It would be relevant for our motivations in epidemi- +ology (ANR project Cadence) and interesting for the branching process analysis to consider +less separated scales. +Acknowledgment The authors are very grateful to Jerome Coville, Alessandra Faggionato +and Chi Tran Viet for stimulating discussions and relevant suggestions. +This work was partially funded by the ANR Cadence ANR-16-CE32-0007 and Chair “Mod- +élisation Mathématique et Biodiversité” of VEOLIA-Ecole polytechnique-MNHN-F.X +M.S. acknowledges the MIUR Excellence Department Project awarded to the Department +of Mathematics, University of Rome “Tor Vergata” CUP E83C18000100006 and thanks the +INdAM unit GNAMPA. +References +[Andjel(1982)] E. D. Andjel. Invariant Measures for the Zero Range Process. The Annals of Probability, 10(3): +525 – 547, 1982. doi: 10.1214/aop/1176993765. URL https://doi.org/10.1214/aop/1176993765. +[Ball and Donnelly(1995)] F. Ball and P. Donnelly. Strong approximations for epidemic models. Stochastic +processes and their applications, 55(1):1–21, 1995. +[Bansaye and Lambert(2013)] V. Bansaye and A. Lambert. New approaches of source-sink metapopulations +decoupling the roles of demography and dispersal. Theoretical Population Biology, 88:31–46, 2013. +[Bansaye and Méléard(2015)] V. Bansaye and S. Méléard. Stochastic models for structured populations. +Springer, 2015. +[Barbour and Reinert(2013)] A. Barbour and G. Reinert. Approximating the epidemic curve. Electronic Jour- +nal of Probability, 18:1–30, 2013. +[Bascompte and Sole(1996)] J. Bascompte and R. V. Sole. Habitat fragmentation and extinction thresholds in +spatially explicit models. Journal of Animal Ecology, 65(4):465–473, 1996. +[Berger(2002)] N. Berger. Transience, recurrence and critical behavior for long-range percolation. Communi- +cations in mathematical physics, 226(3):531–558, 2002. +[Daley and Vere-Jones(2008)] D. J. Daley and D. Vere-Jones. An introduction to the theory of point processes. +Vol. II. Probability and its Applications. Springer, New York, second edition, 2008. ISBN 978-0-387- +21337-8. doi: 10.1007/978-0-387-49835-5. URL https://doi.org/10.1007/978-0-387-49835-5. +[Dalmau and Salvi(2021)] J. Dalmau and M. Salvi. Scale-free percolation in continuous space: quenched degree +and clustering coefficient. Journal of Applied Probability, 58(1):106–127, 2021. doi: 10.1017/jpr.2020.76. +[Deijfen et al.(2013)Deijfen, van der Hofstad, and Hooghiemstra] M. +Deijfen, +R. +van +der +Hofstad, +and +G. Hooghiemstra. Scale-free percolation. In Annales de l’Institut Henri Poincaré, Probabilités et Statis- +tiques, volume 49, pages 817–838. Institut Henri Poincaré, 2013. +[Deprez and Wüthrich(2018)] P. Deprez and M. V. Wüthrich. Scale-free percolation in continuum space. Com- +munications in Mathematics and Statistics, Jul 2018. ISSN 2194-671X. doi: 10.1007/s40304-018-0142-0. +URL https://doi.org/10.1007/s40304-018-0142-0. +[Durrett(1995)] R. Durrett. Ten lectures on particle systems. Lectures on Probability Theory, pages 97–201, +1995. +[Faggionato(2010)] A. Faggionato. Hydrodynamic limit of zero range processes among random conductances +on the supercritical percolation cluster. Electronic Journal of Probability, 15:259–291, 2010. +[Faggionato(2022a)] A. Faggionato. Stochastic homogenization of random walks on point processes. to appear +in Annales de l’Institut Henri Poincaré, 2022a. +[Faggionato(2022b)] A. Faggionato. Hydrodynamic limit of simple exclusion processes in symmetric random +environments via duality and homogenization. Probability Theory and Related Fields, 184(3):1093–1137, +2022b. doi: 10.1007/s00440-022-01163-8. URL https://doi.org/10.1007/s00440-022-01163-8. +[Ganguly and Ramanan(2022)] A. Ganguly and K. Ramanan. Hydrodynamic limits of non-markovian inter- +acting particle systems on sparse graphs. arXiv, 2022. URL https://arxiv.org/abs/2205.01587. + +BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS37 +[Gonçalves and Jara(2008)] P. Gonçalves and M. Jara. Scaling limits for gradient systems in random environ- +ment. Journal of Statistical Physics, 131(4):691–716, 2008. +[Hastings(1983)] A. Hastings. Can spatial variation alone lead to selection for dispersal ? Theoretical Popula- +tion Biology,, 24(3):244–251, 1983. +[Hiebeler(2000)] D. Hiebeler. Populations on fragmented landscapes with spatially structured heterogeneities +: landscape generation and local dispersal. Ecology, 81(6):1629–1641, 2000. +[Hutchcroft(2021)] T. Hutchcroft. Power-law bounds for critical long-range percolation below the upper-critical +dimension. Probability Theory and Related Fields, 181(1):533–570, 2021. +[Ikeda and Watanabe(1989)] N. Ikeda and S. Watanabe. Stochastic differential equations and diffusion pro- +cesses. North-Holland Publishing Company, 1989. +[Jara and Landim(2008)] M. D. Jara and C. Landim. Quenched non-equilibrium central limit theorem for a +tagged particle in the exclusion process with bond disorder. Ann. Inst. Henri Poincaré Probab. Stat., 44(2): +341–361, 2008. ISSN 0246-0203. doi: 10.1214/07-AIHP112. URL https://doi.org/10.1214/07-AIHP112. +[Kipnis and Landim(1998)] C. Kipnis and C. Landim. Scaling limits of interacting particle systems, volume +320. Springer Science & Business Media, 1998. +[Kipnis and Varadhan(1986)] C. Kipnis and S. R. S. Varadhan. Central limit theorem for additive functionals +of reversible Markov processes and applications to simple exclusions. Communications in Mathematical +Physics, 104(1):1 – 19, 1986. doi: cmp/1104114929. URL https://doi.org/. +[Kurtz(1981)] T. G. Kurtz. Approximation of population processes. SIAM, 1981. +[Levins(1969)] R. Levins. Some demographic and genetic consequences of environmental heterogeneity for +biological control. Bulletin of the Entomological society of America, 15(3), 1969. +[Liggett(1973)] T. +M. +Liggett. +An +Infinite +Particle +System +with +Zero +Range +Interactions. +The +Annals +of +Probability, +1(2):240 +– +253, +1973. +doi: +10.1214/aop/1176996977. +URL +https://doi.org/10.1214/aop/1176996977. +[Meleard and Roelly(1993)] S. Meleard and S. Roelly. Sur les convergences etroite ou vague de pro- +cessus +a +valeurs +mesures[On +the +vague +and +weak +convergence +of +measure-valued +processes]. +Universität +Bielefeld. +Forschungszentrum +Bielefeld-Bochum-Stochastik +[BiBoS], +1993. +URL +https://books.google.it/books?id=SgjvtgAACAAJ. +[Moller and Waagepetersen(2003)] J. Moller and R. P. Waagepetersen. Statistical inference and simulation for +spatial point processes. Chapman and Hall/CRC, 2003. +[Montagnon(2019)] P. Montagnon. A stochastic sir model on a graph with epidemiological and population +dynamics occurring over the same time scale. Journal of mathematical biology, 79(1):31–62, 2019. +[Penrose(1991)] M. D. Penrose. On a continuum percolation model. Advances in Applied Probability, 23(3): +536–556, 1991. ISSN 00018678. URL http://www.jstor.org/stable/1427621. +[Perrut(2000)] A. Perrut. Hydrodynamic limits for a two-species reaction-diffusion process. The Annals of +Applied Probability, 10(1):163–191, 2000. +[Pulliam(1988)] H. R. Pulliam. Sources, sinks, and population regulation. American naturalist, pages 652–661, +1988. +[Qi et al.(2019)Qi, Beaunée, Arnoux, Dutta, Joly, Vergu, and Ezanno] L. Qi, G. Beaunée, S. Arnoux, B. L. +Dutta, A. Joly, E. Vergu, and P. Ezanno. Neighbourhood contacts and trade movements drive the re- +gional spread of bovine viral diarrhoea virus (bvdv). Veterinary Research, 50(1):30, 2019. doi: 10.1186/ +s13567-019-0647-x. URL https://doi.org/10.1186/s13567-019-0647-x. +[Vershynin(2018)] R. Vershynin. High-dimensional probability: An introduction with applications in data sci- +ence, volume 47. Cambridge university press, 2018. +École Polytechnique CMAP, École Polytechnique, Route de Saclay 91128 Palaiseau Cedex, +FRANCE. +Email address: vincent.bansaye@polytechnique.edu +Dipartimento di Matematica, Università di Roma Tor Vergata, Via della ricerca scientifica +1, 00133, Rome, Italy +Email address: salvi@mat.uniroma2.it +URL: https://www.mat.uniroma2.it/~salvi/ + diff --git a/DNE4T4oBgHgl3EQfGAw7/content/tmp_files/load_file.txt b/DNE4T4oBgHgl3EQfGAw7/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca249991c36fc68467c002e5fb6cd403c0a83908 --- /dev/null +++ b/DNE4T4oBgHgl3EQfGAw7/content/tmp_files/load_file.txt @@ -0,0 +1,1446 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf,len=1445 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='04890v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='PR] 12 Jan 2023 BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS VINCENT BANSAYE AND MICHELE SALVI Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider a graph where the sites are distributed in space according to a Pois- son point process on Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We study a population evolving on this network, with individuals jumping between sites with a rate which decreases exponentially in the distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Individuals give also birth (infection) and die (recovery) at constant rate on each site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' First, we con- struct the process, showing that it is well-posed even when starting from non-bounded initial conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Secondly, we prove hydrodynamic limits in a diffusive scaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The limiting pro- cess follows a deterministic reaction diffusion equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We use stochastic homogenization to characterize its diffusion coefficient as the solution of a variational principle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The proof involves in particular the extension of a classic Kipnis–Varadhan estimate to cope with the non-reversiblity of the process, due to births and deaths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This work is motivated by the approximation of epidemics on large networks and results are extended to more complex graphs including percolation of edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Key words: Epidemics, branching process, random graphs, stochastic homogenization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' MSC 2020: 92D25, 05C81, 35B27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Introduction and main results Consider a graph G whose vertices V are placed according to a Poisson point process on Rn with n ≥ 2 and with edge set E drawn from some distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Attach to each unoriented edge {x, y} ∈ E a rate r(x, y) = r(y, x) = e−∥x−y∥, where ∥·∥ indicates the Euclidean distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider individuals that perform independent random walks on G with jump rates r(x, y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' They also give birth to new individuals at rate b ≥ 0 and die at rate d ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The main goal of the present work is to describe the limiting behaviour of this particle system under a diffusive rescaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The motivation for studying this kind of process comes from the analysis of real-world networks with agents moving on spatially inhomogeneous structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Metapopulation mod- els (or metacommunity for several species) aim at describing the habitat of a population as a collection of patches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Exchanges between two patches can depend on several features, in particular the distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' From the pioneering works of Levins [Levins(1969)], metapopula- tions have a long story in biology and ecology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Issues come from conservation of species (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Bascompte and Sole(1996), Bansaye and Lambert(2013)]), evolution of dispersion (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Hastings(1983)]), impact of fragmentation of habitats (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Hiebeler(2000)]) and ef- fect of heterogeneity of habitats (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Pulliam(1988)]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' While for the sake of simplicity one would consider a small number of patches, applications often ask for the study of large metapopulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' As far as we know the literature, large metapopulations are considered either in a mean field approximation (see [Levins(1969)]) or with a spatially explicit large structure using cellular automates and simulations ([Bascompte and Sole(1996)]) or in a peri- odic environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Random networks provide a relevant mathematical framework to analyze 1 2 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI models which do not fall in the mean field approximation and that would make explode the parameters’ complexity if considered as large explicit graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Rigorous works which combine motion and demography (birth, death, infections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=') on large random landscapes are rare for now.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Our interest is understanding how an epidemics would spread on such structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' As a driving example, one can consider the spread of an infection among cattle on the French network of farms, see [Qi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (2019)Qi, Beaunée, Arnoux, Dutta, Joly, Vergu, and Ezanno].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In this first work we identify the diffusive behaviour of an epidemics in its first stages, which corresponds to the classic branching process approximation for small ratio of infected indi- viduals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In this case b represents the contamination rate and d is the recovery rate for the infected population.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This approximation is valid on a time window where the infected popula- tion remains locally small compared to the population size, see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Ball and Donnelly(1995), Barbour and Reinert(2013), Montagnon(2019)] for the classical mixed SIR model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' From a mathematical point of view, the first challenge is represented by the unboundedness of the jump rates: on the one hand a site in V can have a huge number of close-by neighbours, so that the jump rate of a single individual can be arbitrarily large at that site;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' on the other hand there is no restriction on the number of individuals that can occupy a given site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proving that such a process is well-posed is in itself not trivial: both classic and more recent techniques for proving existence fail to apply to our framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The second challenge is represented by the irregularity of the support V combined with the lack of reversibility of the system, due to births and deaths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In order to study the limiting behaviour of the process, we need to gather approaches coming from statistical mechanics and mathematical biology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To be more precise, the way we can cope with the random geometry of the underlying graph is through stochastic homogenization and in particular the results of [Faggionato(2022a)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The theory of homog- enization, first started in a deterministic context by analysts, describes how the microscopic irregularities of a medium affect the macroscopic behavior of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' It is by now well understood how to use this technique to derive hydrodynamic limits for reversible particles systems, see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Gonçalves and Jara(2008), Faggionato(2022b)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Yet, to our knowledge, one fundamental requirement for obtaining these results has been the reversibility of the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In our context we need to adapt some tools to non–reversible population models, in the vein of e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Kurtz(1981), Bansaye and Méléard(2015)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In particular, a fundamental ingredient is the extension of an inequality for the supremum of a particle process due to Kipnis and Varadhan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This estimate is required for the proof of tightness, for the identification of the limit and to show that this limit has a density with respect to the Lebesgue measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Model and main results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For some probability space (Ω, P, F) and ω ∈ Ω, let V = V (ω) be the points of a Poisson point process on Rn with n ≥ 2 and intensity γ > 0 under P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let E = E(ω) = {{x, y}, x, y ∈ V } be the set of unoriented edges between the points of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We will consider at first the complete graph G = (V, E) as a support for our particle system, while in Section 6 we will discuss how to extend our results by generalizing G via bond percolation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Given ω and a configuration of particles η ∈ NV consider the transitions η −→ \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 ηx,y with rate η(x)r(x, y) ηx,+ with rate b η(x) ηx,− with rate d η(x) (1) where ηx,y = η − 1x + 1y is the configuration obtained from η by subtracting one particle in x ∈ V and adding one in y ∈ V , ηx,+ = η + 1x adds one particle in x ∈ V and ηx,− = η − 1x BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS 3 has one particle less in x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The positive numbers r(x, y) = r(y, x) = e−∥x−y∥ are the jump rates for each particle to go from point x ∈ V to point y ∈ V , and vice-versa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For simplicity we set r(x, x) = 0 for all x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We let r(x) := � y∈V : {x,y}∈E r(x, y) be the total jump rate of a particle at site x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' It is not hard to show that, P–almost surely, r(x) is finite for every x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The parameters b, d ≥ 0 are the individual rate of birth and death of the particles, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For a given realization G(ω) of the graph, we introduce a probability space with measure P ω under which we will construct our particle process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Eω indicates the associated expectation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let η0 be the initial configuration of particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Our first result establishes that, for P–almost every ω, there exists a Markov process with jump rates given by (1) as soon as η0 has uniformly bounded expectation on each site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For P-a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ω ∈ Ω the following holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let η0 be a random variable on NV such that, for some M ∈ N, one has Eω[η0(x)] ≤ M for all x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then, for all T > 0, there exists a Markov process (ηt)t∈[0,T] with initial value η0 and paths in the Skohorod space D([0, T], NV ) that satisfies the following: for functions fG : NV → R of the form fG(η) = � x∈V G(x)η(x) with G compactly supported on Rn, the generator L of (ηt)t∈[0,T] is given by LfG(η) = � x,y∈V η(x)r(x, y) � G(y) − G(x) � + � x∈V η(x) � b − d � G(x) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (2) Our second result establishes the hydrodynamic limit of the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let M(Rn) be the Polish space of non-negative Radon measures on Rn endowed with the vague topology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For π ∈ M(Rn) and a continuous function G ∈ C(Rn) we write ⟨π, G⟩ = � Rn G(y) π(dy).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We consider a scaling parameter N ∈ N and associate to each element η ∈ NV the empirical measure πN = πN(η) = N −n � x∈V η(x)δx/N ∈ M(Rn), where δy represents a Dirac mass at y ∈ Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Conversely, we can recover η from πN via η(·) = η(πN)(·) = N nπN(·/N), so that for any fixed N we may use πN and η indifferently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In this work, we are interested in the regime where the motion is faster than births and deaths (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' of infection and recovery rates for epidemics).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Thus, for a given N, we introduce now the process ηN with sped–up motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For G compactly supported, its generator is given by (recall that fG(η) = � x∈V G(x)η(x)) LNfG(η) = � x∈V η(x)LNG(x/N) + � x∈V η(x) � b − d � G(x/N) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (3) Here LNG(x/N) = � y∈V N 2r(x, y) � G(y/N) − G(x/N) � (4) is the generator of the random walk on V/N := {x/N : x ∈ V (ω)} with transition rates N 2r(·, ·).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The associated measure-valued process is defined as πN t := 1 N n � x∈V ηN t (x)δx/N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (5) 4 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 guarantees that, for all T > 0 and fixed N ∈ N, (πN t )t∈[0,T] is a well-defined Markov process with values in D([0, T], M(Rn)), the space of measure-valued càdlàg processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For the scaling limit, we need to consider initial conditions such that the tails of η0 are dominated by a product of (translated) Poisson distributions indexed by V , as precisely defined here below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This allows in particular to invoke the existence and characterization stated in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For example, one can take configurations with a number of particles that is a constant on each site or that is distributed as i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Poisson random variables, or a sum of the two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We also need that the initial conditions converge as N goes to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For a given realization of the graph ω ∈ Ω, we make thus the following assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Domination & Convergence Assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The sequence of random configurations (ηN 0 )N∈N satisfies the following: (i) There exists M ∈ N0 and ρ > 0 such that for any N ∈ N, x ∈ V and for any A ⊂ V and (nx)x∈A ∈ N|A|, P ω� ∀x ∈ A, ηN 0 (x) ≥ M + nx � ≤ � x∈A � ∞ � j=nx ρje−ρ j!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (6) (ii) There exists a bounded Borel function ρ0 : Rn → [0, ∞) such that, for any C∞ function with compact support G ∈ C∞ c (Rn), lim N→∞ N −n � x∈V ηN 0 (x)G(x/N) = � Rn G(x)ρ0(x) dx (7) in P ω–probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider now B(Ω), the family of bounded Borel functions on Ω, and let σ2 ≥ 0 be char- acterized by the variational formula σ2 := 1 2 inf ψ∈B(Ω) E0 � � y∈V r(0, y) � y1 + ψ(θyω) − ψ(ω) �2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (8) Here y1 denotes the first coordinate of y ∈ Rn and θyω is the environment translated by y (see Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 for the precise meaning of this).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The expectation E0 is taken with respect to the Palm measure relative to the underlying Poisson point process, which can be obtained by just adding to the configuration a point at the origin (see [Daley and Vere-Jones(2008)] for a complete account of Palm measures).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Calling In the n-dimensional identity matrix, we point out that 2σ2In is the diffusion matrix of the Brownian motion obtained by rescaling diffusively the random walk on the Poisson point process with transition rates r(x, y), see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Faggionato(2022a)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For P-a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ω ∈ Ω the following holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let (ηN 0 )N∈N be a sequence of random variables on NV which satisfies the Domination & Convergence Assumption for some bounded Borel function ρ0 : Rn → [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then the sequence of processes {(πN t )t∈[0,T]}N∈N with initial value πN 0 = πN(ηN 0 ) converges in law in D([0, T], M(Rn)) to the deterministic trajectory (ρ(t, u) du)t∈[0,T], where ρ(·, ·) : [0, T] × Rn → R is the unique weak solution of the problem � ∂tρ = σ∆ρ + (b − d)ρ ρ(0, ·) = ρ0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (9) BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS 5 Since the sequence of processes converges in distribution to a deterministic process, we obtain immediately the following convergence in probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Under the hypothesis of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2 we have that, for all t > 0, G ∈ Cc(Rn) and ε > 0, lim N→∞ P ω����N −n � x∈V G(x/N)ηt(x) − � Rn G(x)ρ(x, t) dx ��� ≥ ε � = 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' State of the art, techniques and structure of the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The rest of the paper is substantially divided into two parts corresponding to the proofs of the two main theorems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Section 2: Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 establishes the well-posedness of the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' As mentioned before, our setting does not seem to be treated in the previous literature, even if we set the rates of birth and death equal to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The case b = d = 0 corresponds to an instance of the so called zero-range process on G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The existence of the zero- range process on an arbitrary countable state space was proved in the classical work [Liggett(1973)] and then under weaker assumptions in [Andjel(1982)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' A first require- ment for those constructions is that, in some sense, the rate of jump of each particle must be uniformly bounded from above, a condition that fails in our setting due to the irregularity of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' A second problem is that in [Liggett(1973)] and [Andjel(1982)] one must impose a restriction on the initial configuration of particles η0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Namely, one accepts only η0 satisfying � x∈V η0(x)α(x) < ∞ for some function α such that � y∈V p(x, y)α(y) ≤ Mα(x), where M > 0 is a given constant and p(x, y) indicates the probability to go from x to y when the particle jumps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In our case, again because of the irregularity of V , this condition would not allow us to consider, for example, initial conditions with a constant number of particles on each site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Neither more re- cent approaches to prove existence for general particle systems on random graphs, like [Ganguly and Ramanan(2022)], cover our model, because of the unboundedness of the jump rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We adopt a different approach which borrows from [Andjel(1982)] the idea of ghost particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 we enlarge our space and consider a richer measure-valued process where, roughly put, particles are labelled and leave a “ghost” behind them every time they jump to a new site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To show existence of the original process, we pass through the well-posedness of the stochastic differential equation (12) associated to this richer measure-valued process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2 we prove the existence when we restrict the dynamics to a finite subgraph of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We also pin down a key estimate of how many particles have visited a given compact set up to time T in mean, making use of the ghosts (Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3 we extend the existence of the process when considering the whole infinite graph, but under the condition of having a finite number of particles at time 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This is achieved by showing that the range covered by the particles stays finite almost surely, see Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Finally, in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4, we include in our construction also the case of an infinite number of initial particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Section 3: In this section we prepare some of the technical tools that are necessary for the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The operator LN can be thought of as a discretization of the operator σ2∆.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For a given G ∈ C∞ c (Rn), though, some difficulties arise if one tries to prove directly the convergence of LNG, due to the possible lack of regularity of this last object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To overcome the problem, one wants to substitute G by a regularized 6 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI version Gλ N for which LNGλ N directly yields the expected limit σ2∆G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This proce- dure, introduced in [Jara and Landim(2008)] in the context of hydrodynamic limits and further developed in [Gonçalves and Jara(2008)], requires results from stochas- tic homogenization theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' First of all, in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1, we prove that indeed we are allowed to use the homogenization machinery elaborated in [Faggionato(2022a)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2 we introduce Gλ N, prove some bounds in norm for this function and use the homogenization results to show its convergence to G in L1 and in L2, see Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3 and Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Section 4: One of the main technical ingredients for proving the hydrodynamic limit of the sequence of processes {(πN t )t∈[0,T]}N∈N is the Kipnis-Varadhan estimate to control the supremum of the particle process integrated against a test function, see [Kipnis and Varadhan(1986)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The estimate in its classic form, though, is valid only for reversible processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 we adapt the Kipnis-Varadhan estimate to our model without births and deaths, which is reversible but presents some issues due to the irregularity of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3 we extend the estimate to the non-reversible setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The idea is to look separately at each branch of the genealogical tree of the particles in the initial configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The process that looks at particles of a given branch can then be dominated by another (reversible) process, to which we can apply the original Kipnis-Varadhan type of estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This dominating process is obtained via a percolation procedure on the particles in the initial configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Section 5: The strategy to prove Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2 follows a classical tightness and identifi- cation procedure, which relies on the two previous sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 we consider the martingale problem and show that the process MN appearing in (74) is an L2 martingale via a truncation argument.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We also prove that MN tends to 0 in L2 as N tends to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This enables to easily conclude the proof of tightness by Aldous’ cri- terion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Finally in Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='6 we prove that a limiting value (πt)t∈[0,T] of the sequence {(πN t )t∈[0,T]}N∈N must have a density with respect to the Lebesgue measure and that it has to satisfy a suitable differential equation that admits a unique weak solution, cfr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (71).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For simplicity of exposition the proof until this point has been elaborated for the case without deaths, d = 0, and we conclude the section by extending the result to the general case d > 0, see Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Section 6: In the very last part of the paper we show that our two main theorems continue to hold if we consider a percolation procedure on the edges of the complete graph with nodes V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' As special cases of interest for applications, we analyze the long- range percolation and scale-free percolation random graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We conclude in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2 with a discussion of open problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For a given realization of the graph G = G(ω), with ω ∈ Ω, we recall that P ω is the probability measure under which we have built the process defined in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 and Eω is the relative expectation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We will make clear each time what initial distribution of particles the process is starting from, but sometimes we will further stress the initial condition with a subscript.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For example, if the initial distribution of particles on G(ω) is µ, then we can write P ω µ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Remark 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Through most of the proofs of the paper, we will talk directly about P ω, without specifying each time that ω ∈ Ω is a realization of the underlying graph sampled according to measure P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' All the processes that appear will evolve under P ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' All the claims about these BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS 7 processes have to be intended to be true for P–almost all ω, even when we do not mention it explicitly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' As mentioned before, M = M(Rn) stands for the Polish space of non–negative Radon measures on Rn endowed with the vague topology (namely, a sequence of measures νn converges to a measure ν in M if ⟨νn, f⟩ → ⟨ν, f⟩ for all f ∈ Cc(Rn)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consequently, D([0, T], M(Rn)) indicates the space of measure-valued càdlàg processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Existence and characterization of the process We will prove the existence of the process just with d = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Indeed, a positive rate of death of the particles cannot contribute to the explosion of the process in finite time (if anything, it can help prevent it).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' So, if the process is well-defined for d = 0, a completely analogous construction proves that it is also well-defined for any d > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Measure valued process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In order to prove Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 with d = 0 we will have to consider an auxiliary process that encodes more information than (ηt)t∈[0,T] and that lives in the space of measure-valued processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let I := N × � k≥0{1, 2}k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Under measure P ω, let (N x,y i )i∈I,x,y∈V be a collection of Poisson point measures on R+ with intensity r(x, y) dt and recall that r(x, x) = 0 for each x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' These Poisson point measures are chosen independent for each ordered couple (x, y) and they are also independent of the initial state η0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Also let (N b i )i∈I be a collection of independent Poisson point measures on R+ with intensity b dt and independent of the N x,y i ’s and η0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The interpretation is the following: I shall be thought of as the space of labels attached to each single particle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Particles that are present at time 0 will be just labelled with the natural numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' If particle i is present at time t ≥ 0, we call Xi t ∈ V its position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Suppose a particle with label i = n i1i2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ik, with n ∈ N and k ∈ N0 and ij ∈ {1, 2} for j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , k, is at position Xi t− at time t− and suppose that N b i (t) − N b i (t−) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then particle i disappears at time t and is replaced by two particles with labels n i1i2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ik1 and n i1i2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ik2 on the same site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' If instead particle i is at Xi t− = x at time t− and N x,y i (t) − N x,y i (t−) = 1, then particle i disappears and generates particle i1 at site y, that is, Xi1 t = y, and it leaves behind a ghost particle on site x labelled with i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This way of labelling the particles is commonly known as the Ulam–Harris–Neveu notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let �πt = � i∈At δ(i,Xi t,a) + � i∈Gt δ(i,Xi t,g) be the measure on I × V × {a, g} keeping track of position and state of each particle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For any i ∈ I and u ∈ {a, g}, one has �πt({i} × V × {u}) ∈ {0, 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' More precisely, At := {i ∈ I : �πt({i} × V × {a}) > 0} (10) is the set of particles that are present at time t and that can jump or give birth, also called alive particles, while Gt := {i ∈ I : �πt({i} × V × {g}) > 0} (11) is the set of ghost particles present at time t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 8 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI Our aim is to construct the process (�πt)t≥0 which satisfies P ω–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', on every compact set, �πt = �π0 + � t 0 � i∈As−, y∈V � δ(i1,y,a) + δ(i,Xi s−,g) − δ(i,Xi s−,a) � N Xi s−,y i (ds) + � t 0 � i∈As− � δ(i1,Xi s−,a) + δ(i2,Xi s−,a) − δ(i,Xi s−,a) � N b i (ds) (12) where, for i ∈ At, Xi t is the unique element x ∈ V such that �πt({i} × {x} × {a}) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The initial configuration �π0 might be random under P ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For a Borel set A ⊆ Rn, we also let πt(A) := �πt(I × (A ∩ V ) × {a}) (13) be the total number of alive particles in A at time t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We will see that (πt)t∈[0,T] corresponds to the measure-valued process (πN t )t∈[0,T] introduced in (5) with N = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We will show the existence of the process (ηt)t∈[0,T] by constructing the richer process (�πt)t∈[0,T] in three steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' First, in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2, we will show the existence of an analogous process restricted to a finite graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We will then build on this to extend the existence of the process on the infinite graph, but only when the initial configuration has a finite number of particles, see Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Finally in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4 we will conclude with the existence of (�πt)t∈[0,T] under the conditions of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Existence of the process on a finite graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In this section we deal with a version of the process (�πt)t∈[0,T] for which the underlying spatial point process is restricted to a finite number of points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Fix a bounded set B ⊂ Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The process (�πB t )t∈[0,T] is defined as the strong solution of a stochastic differential equation whose jumps are represented by the Poisson point measures introduced in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Analogously to (10) and (11), let AB t := {i ∈ I : �πB t ({i} × V ∩ B × {a}) > 0} and GB t := {i ∈ I : �πB t ({i} × V ∩ B × {g}) > 0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For i ∈ AB t , we write Xi t the location of particle i at time t, that is the unique point x ∈ V such that �πB t ({i, x, a}) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then (�πB t )t∈[0,T] is defined via �πB t = �πB 0 + � t 0 � i∈AB s−, y∈V ∩B � δ(i1,y,a) + δ(i,Xi s−,g) − δ(i,Xi s−,a) � N Xi s,y i (ds) + � t 0 � i∈AB s− � δ(i1,Xi s−,a) + δ(i2,Xi s−,a) − δ(i,Xi s−,a) � N b i (ds) , (14) where the initial configuration �πB 0 is a point measure on I ×(V ∩B)×{a, g}, possibly random under P ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that we do not impose any restriction on the initial configuration at this stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let us justify that there exists a unique solution to this stochastic differential equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Since we deal with a countable discrete state space, the existence of such a solution can be shown just by constructing a stochastic process with a classical inductive scheme, where the successive jumps are given by the Poisson point measures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This is a strong Markov process which is well defined until the potential accumulation point of the jumps (if explosion occurs) and it is the solution of (14) until the time of explosion by construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We just need to prove BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS 9 that explosion does not occur almost surely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For that purpose, let us introduce the projection of the process on the last two coordinates ZB t (K, u) := �πB t (I, V ∩ K, u) K ⊆ Rn, u ⊆ {a, g} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ZB t (K, u) counts the alive or ghost particles in K at time t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Under P ω, (ZB t )t∈[0,T] is a multi- type branching process with a finite number of types (that is, (V ∩ B) × {a, g}) and bounded reproduction mean: at rate r(x, y) each particle of type (x, a) is replaced by two particles of types (x, g) and (y, a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' At rate b each particle of type (x, a) creates a new particle of type (x, a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The particles of type (·, g) do not evolve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Using classical first moment estimates of the branching process (ZB t )t∈[0,T] we obtain non-explosivity of the process, so (�πB t )t∈[0,T] is well defined for any positive time T > 0, see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Bansaye and Méléard(2015)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Actually, we will need more quantitative estimates on the first moment of ZB t for the limiting procedure in the next section, in particular the dependance on the transition rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' These estimates are given in the next lemma, using the harmonic function of the branching process, which is here constant in space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider two compact sets K, B ⊂ Rn with K ⊆ B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Take a (possibly random) initial configuration ZB 0 such that ZB 0 (B, {g}) = 0 and, for some M > 0, Eω[ZB 0 (x, {a})] ≤ M for all x ∈ B ∩ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then it holds, for all T > 0, Eω[ZB T (K, {a, g})] ≤ CKMebT , (15) where CK = � x∈K∩V (b−1r(x) + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In particular CK does not depend on B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We define the matrix MB t with entries indexed by the types MB t � (x, u), (y, v) � = Eω δ(x,{u}) � ZB t (y, {v}) � x, y ∈ B ∩ V, u, v ∈ {a, g} , so that MB t � (x, u), (y, v) � indicates the mean number of particles of type (y, v) present at time t if we started with a unique particle of type (x, u) at time 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This matrix is the first moment semigroup associated to a branching process and thus coincides with exp(tA), where A = A(B) is a finite matrix given by A = � A1 A2 0 0 � with the following blocks: A1 is the submatrix accounting for the evolution of a–particles, that is, for x ̸= y ∈ B∩V , we have A1(x, x) = b − rB(x) with rB(x) := � z̸=x,z∈B∩V r(x, z) and A1(x, y) = r(x, y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' A2 accounts for the generation of g–particles from a–particles, that is, A2(x, y) = r(x, y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The two lower blocks have all the entries equal to 0, since g–particles neither move nor generate other g–particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To see that, one may use Kolmogorov forward equation or apply the differential equation (14) to (I, y, v) with �πB 0 = δ{1,x,{u}} and take expectation on both sides to get MB t � (x, u), (y, v) � = δ(x,u)=(y,v) + � t 0 MsA � (x, u), (y, v) � ds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We compute now exp(At): Ak = � Ak 1 Ak−1 1 A2 0 0 � =⇒ eAt = � eA1t � k≥1 Ak−1 1 A2 k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' tk 0 Id � where Id is the identity matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Call z0 = (M, M, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , M ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 0, 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , 0) the configuration having M alive particles on each site of B∩V and no ghost particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then, for any initial configuration 10 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI ZB 0 with less than M alive particles per site in B ∩ V in average and no ghosts as in the hypothesis of the lemma, it holds Eω� ZB T (K, {a, g}) � = Eω� ZB 0 eAT (K, {a, g}) � = Eω[ZB 0 ]eAT (K, {a, g}) ≤ z0eAT (K, {a, g}) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Since ¯1 = (1, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , 1) is an eigenvector for the matrix A1 with eigenvalue b (that is, ¯1A1 = (b, b, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , b)) and since ¯1 � k≥1 Ak−1 1 A2 k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' tk = � k≥1 bk−1(1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , 1)A2 k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' tk = � k≥1 bk−1 k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' tk(rB(x))x∈B = b−1(ebt − 1)(rB(x))x∈B , it holds z0eAt = M(ebt, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , ebt ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' b−1(ebt − 1)r(·), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , b−1(ebt − 1)r(·)) and we can conclude that Eω[ZB T (K, {a, g})] ≤ MebT #{K ∩ V } + Mb−1ebT � x∈K∩V r(x) , which implies (15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Existence of the process on the infinite graph with a finite number of initial particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In this section we want to show that the process (�πt)t≥0 described in (12) is well defined when we start with a configuration with a finite number of particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We will show that (�πt)t≥0 can be in fact obtained as the limit of the process (�πBN t )t≥0 introduced in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2, where BN is the n-dimensional box [−N, N]n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider the process (�πBN t )t∈[0,T] introduced in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2 up to time T > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We want to show now that this process “stabilizes” as N tends to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' That is, suppose to use the same source of randomness (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' the same realization of the Poisson processes N x,y i , N b i ) to construct the process (�πBN t )t∈[0,T] for all different N’s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then, with P ω–probability 1, there exists N0 ∈ N such that, for all N ≥ N0, (�πBN t )t∈[0,T] ≡ (�π BN0 t )t∈[0,T] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (16) To this end, we first of all prove that the progeny of a finite number of particles remains into a finite region up to time T > 0 with probability 1 as N → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For an initial configuration of alive and ghost particles z0, define the maximal displacement at time T as RN(z0, T) := sup t∈[0,T] sup i∈At ∥XN,i t ∥ , where XN,i t is the position of particle i at time t in the process (�πBN t )t∈[0,T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider an initial configuration z0 with a finite number of alive particle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then, P ω–almost surely, there exists Q > 0 such that RN(z0, T) ≤ Q for all N ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We first consider z0 to be constituted of a unique alive particle, labelled with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Without loss of generality we can imagine particle 1 to start at the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Abbreviate R = RN(z0, T) and consider (�πBN t )t∈[0,T] for any N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For M > 0, we can bound P ω(R > M) = ∞ � ℓ=0 P ω(R > M | E2ℓ+1)P ω(E2ℓ+1) (17) BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS11 with Eℓ = Eℓ(T) := {particle 1 had ℓ − 1 descendants up to time T} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By descendant of particle 1 we mean a particle with label starting by 1 and that was generated via a birth event (so we do not count the particles whose label start by 1 that were generated with a change of label due to a jump event).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We are considering only odd integers 2ℓ + 1 since each time a particle disappears it generates two new particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For ℓ ∈ N, the quantity P ω(Eℓ) is clearly dominated by P ω(E+ ℓ ), with E+ ℓ := {particle 1 had at least ℓ − 1 descendants up to time T} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The number ZBN t (Rn, {a}) of alive particles at time t follows a N-valued Markov process starting in 1 and that goes from k to k+1 with rate kb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let (ek)k∈N be independent exponential random variables under P ω with Eω[ek] = (bk)−1 and let Sℓ := �ℓ k=1 ek.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We bound, for all θ > 0, P ω� Sℓ ≤ T � ≤ eθT Eω[e−θSℓ] = eθT ℓ � k=1 � 1 − θ kb + θ � ≤ eθT−�ℓ k=1 θ kb+θ , (18) where for the first inequality we have exploited the exponential Markov inequality, while for the second passage we have used the independence of the ek’s and the formula for the moment generating function of the exponential distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that if the total number of descendants of particle 1 at time T is 2ℓ, then ZBN t (Rn, {a}) = ℓ + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' A (non-optimized) choice of θ = 4b in (18) yields therefore P ω(E+ 2ℓ+1) = P ω(ZBN t (Rn, {a}) ≥ n + 1) = P ω� Sℓ+1 ≤ T � ≤ e4bT−�ℓ+1 k=1 4 k+4 ≤ Cℓ−4 (19) for ℓ sufficiently large and some universal constant C > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We move to the analysis of the term P(R > M | Eℓ) in (17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Abandoning for a moment the Ulam–Harris–Neveu notation, let us look at the descendants of particle 1 and just label them 2, 3, 4, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' in chronological order of birth (particles 2j and 2j + 1 are born in the same instant, for all j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let xk ∈ V and tk ∈ [0, T] be the site and the time where the k-th particle was born and let (Xk t ) be its trajectory while alive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let Rk be the maximal displacement of particle k, that is, Rk := supt∈[tk,T] ∥Xk t − xk∥.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We observe that P ω(R > M | Eℓ) = P ω� ∃k ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , ℓ} : Rk > M/ℓ and Rj < M/ℓ for all j < k | Eℓ � ≤ ℓ � k=1 P ω� Rk > M/ℓ �� Ak,ℓ � , (20) with Ak,ℓ := {Rj < M/ℓ for all j < k} ∩ Eℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We have therefore to study the probability that (Xk t ) left the ball BM/ℓ(xk) before time T knowing that the first k − 1 particles had a displacement smaller than M/ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We point out that, under Ak,ℓ, we have that BM/ℓ(xk) is completely contained in BM: it follows that all x ∈ BM/ℓ(xk) have r(x) < C log M by Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3 (see here below) and that BM/ℓ(xk) contains at most CMn log M/ℓn points of V (this follows from item (ii) in the proof of Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let τk := inf � t ∈ [tk, T] : Xk t ̸∈ BM/ℓ(xk) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 12 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI We decompose the event {τk ≤ T} = {τ ′ k < τk ≤ T} ∪ {τk ≤ T, τ ′ k} with τ ′ k := inf � t ∈ [tk, T] : ∥Xk t − Xk t−∥ ≥ � M/ℓ � the first time that particle k makes a jump longer than � M/ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Under Ak,ℓ, the event that (Xk t ) makes a jump of length larger than � M/ℓ inside BM/ℓ(xk) has rate smaller than e−√ M/ℓ times the number of the points in BM/ℓ(xk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' It follows that P ω(τ ′ k < τk ≤ T | Ak,ℓ) ≤ P ω(ξ ≤ T) ≤ cTe−√ M/ℓMn log M/ℓn (21) for some universal constant c > 0, where ξ is an exponential random variable with parameter Ce−√ M/ℓMn log M/ℓn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' On the other hand, the event {τk ≤ T, τ ′ k} implies that Xk has performed more than � M/ℓ jumps before time T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Remember that under Ak,ℓ each jump has rate smaller than C log M by Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Hence, if Y is a Poisson random variable of parameter CT log M, P ω(τk ≤ T, τ ′ k | Ak,ℓ) ≤ P ω(Y > � M/ℓ) ≤ e−CT log M� (M/ℓ)−1/2eCT log M �√ M/ℓ (22) where the last bound holds for M/ℓ sufficiently large, for example when ℓ ≤ √ M, see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Vershynin(2018), Exercise 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Continuing from (20), bounds (21) and (22) together yield, for M sufficiently large and ℓ ≤ √ M, P ω(R > M | Eℓ) ≤ ℓ � k=1 P ω(τk ≤ T �� Ak,ℓ) ≤ c1ℓe−c2√ M/ℓ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (23) Going back to (17) and using (54) and (23), we finally have P ω(R > M) ≤ √ M � ℓ=0 P ω(R > M | E2ℓ+1) + ∞ � ℓ= √ M P ω(E+ 2ℓ+1) ≤ c1Me−c2M1/4 + CM−3/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This quantity is summable in M, which implies the claim by the Borel-Cantelli lemma for a single initial particle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The argument can be easily generalized to any finite number of initial particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' There exists C > 0 such that, for P-a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ω, the following holds: there exists ¯N = ¯N(ω) such that ∀N ≥ ¯N one has max x∈BN∩V r(x) < C log N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We will use the two following trivial facts about Poisson point processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Recall that BN = [−N, N]n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let (Bi)i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=',(2N)n be a collection of disjoint (up to their border) volume-1 cubes covering BN and let Cj := {x ∈ Bj+1 \\ Bj}, for j ∈ N, be the j-th square-crown around the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then there exist constants c1, c2 > 0 only depending on the dimension n such that, for P-a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ω, (i) there exists N1 = N1(ω) such that for all N ≥ N1 #{x ∈ CN ∩ V } ≤ c1N n−1 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (ii) there exists ¯N2 = ¯N2(ω) such that for all N ≥ ¯N2 and for all i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , (2N)n #{x ∈ Bi ∩ V } ≤ c2 log N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS13 Both facts can be checked by using classic concentration inequalities for Poisson random variables around their mean and then the Borel-Cantelli lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Take N ≥ max{N1, N2} and write, for x ∈ BN, r(x) = � y∈B2N ∩V e−∥x−y∥ + � y∈Bc 2N∩V e−∥x−y∥ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (24) For the first sum we divide B2N into Bi’s as for item (ii) above, with i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , (2N)n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that, for all k ∈ N, there are less than c3kn−1 such boxes at distance k from x, for some c3 > 0 that only depends on the dimension n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Furthermore, in each of these boxes there are at most c2 log(2N) vertices by (ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Hence it holds � y∈B2N∩V e−∥x−y∥ ≤ 2N � k=0 c3e−kkn−1 · c2 log(2N) ≤ c5 log N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (25) For the second sum in (24) we use item (i) and bound � y∈Bc 2N∩V e−∥x−y∥ = ∞ � k=N � y∈Ck∩V e−∥x−y∥ ≤ ∞ � k=N+1 c1kn−1e−(k−N) ≤ c6 (26) for some c6 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Putting (25) and (26) into (24) gives the result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4 (Corollary of Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider a compact set Q ⊂ Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let Z0 ∈ NV × NV be an initial configuration such that Z0(x, {a, g}) = 0 for all x ̸∈ Q∩V and Eω[Z0(x, a)] ≤ M for all x ∈ Q ∩ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then, P ω–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', for every I ∈ I, K ⊂ Rn compact, U ⊆ {a, g} and t > 0, the following limit exists: �πt(I, K, U) := lim N→∞ �πBN t (I, K, U) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Furthermore, the following holds: (i) The measure (�πt)t≥0 verifies equation (12), where the two sides are finite measures and coincide on Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (ii) Defining for all compact sets K ⊂ Rn, for all u ⊆ {a, g} and for all t ≥ 0 Zt(K, u) := �πt(I, K, u) , one has, for all T > 0, Eω[ZT (K, {a, g})] ≤ CKMebT (27) where CK = � x∈K(b−1r(x) + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The existence of �πt follows immediately from Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2, since it implies that (16) holds P ω–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' for all T > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In particular, (16) and the fact that �πBN T (I, Rn, {a, g}) < ∞ almost surely (which follows by (15)) imply that �πT(I, Rn, {a, g}) < ∞ almost surely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We let As := limN→∞ ABN s for almost every realization of the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 14 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI For item (i), we first notice that (�πBN t ) satisfies (14) with B = BN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' P ω–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', for all bounded test functions f with support on some set C ⊂ Rn and for all t ∈ [0, T] we have � t 0 � i∈As−, y∈V ��f(i1, y, a) + f(i, Xi s−, g) − f(i, Xi s−, a) �� N Xi s−,y i (ds) ≤ 3∥f∥∞ � � T 0 � i∈As−, Xi s−∈C, y∈V N Xi s−,y i (ds) + � T 0 � i∈As−, Xi s−̸∈C, y∈C N Xi s−,y i (ds) � ≤ 3∥f∥∞�πT (I, C, {a, g}) < ∞ (28) since �πT (I, Rn, {a, g}) < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Similarly the number of births in C is a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' controlled by �πT (I, C, {a, g}): � t 0 � i∈As− ��f(i1, Xi s−, a) + f(i2, Xi s−, a) − f(i, Xi s−, a) �� N b i (ds) ≤ 3∥f∥∞�πT (I, C, {a, g}) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (29) Indeed, for each newborn in C there is either an active particle in C or, at least, a ghost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To sum up, the integrals appearing on the right hand side of (12) are almost surely well defined on Rn and finite and (i) follows from (14) by letting N go to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We turn our attention to item (ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By Fatou’s lemma and Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1, Eω[ZT (K, {a, g})] ≤ lim inf N→∞ Eω[ZBN T (K, {a, g})] ≤ CKMebT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This ends the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Existence of the process on the infinite graph with infinitely many initial particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In the previous section we have shown that the process (�πt)t∈[0,T] is well defined as soon as the initial condition involves only a finite number of particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We want to show the existence of (�πt)t∈[0,T] also for initial configurations where the average number of particles on each site is bounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider an initial configuration of particles Z0 ∈ NV × NV of alive and ghost particles such that Eω[Z0(x, a)] ≤ M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For N ∈ N the truncated configuration Z0,N is obtained by considering only the particles in Z0 that are inside the ball BN: Z0,N(x, ·) = Z0(x, ·)1x∈BN .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' A central observation is that we have monotonicity in N of the process: take N1 < N2 and couple the processes started in Z0,N1 and Z0,N2, call them (�πt,Nj)t≥0 for j = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then we have P ω–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' �πt,N1(i, x, u) ≤ �πt,N2(i, x, u) ∀i ∈ I, x ∈ V, u ∈ {a, g} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (30) As a consequence, we have the following proposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider a configuration of particles Z0 ∈ NV ×NV such that Eω[Z0(x, a)] ≤ M for all x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let (�πt,N)t≥0 be the process on the infinite graph started in Z0,N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then, P ω–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', for every I ∈ I, K ⊂ Rn compact, U ⊆ {a, g} and t ≥ 0, the following limit exists and is finite: �πt(I, K, U) := lim N→∞ �πt,N(I, K, U) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS15 Furthermore one has, for all T > 0, Eω[�πT (I, K, {a, g})] ≤ CKMebT (31) where CK = � x∈K(b−1r(x) + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that we have called the limiting process again (�πt)t≥0, since we have extended the definition appearing in Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4 to a larger set of initial conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The existence of the limit follows by the monotonicity in (30).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Fix any T > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We want to show now that, P ω–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', �πt(I, K, {a, g}) does not explode for any compact K ⊂ Rn and 0 ≤ t ≤ T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Keeping in mind (30), we can use monotone convergence in N to see that Eω� sup t∈[0,T] �πt(I, K, {a, g}) � = Eω[�πT (I, K, {a, g})] = lim N→∞ Eω[�πT,N(I, K, {a, g})] (27) ≤ CKMebT < ∞ , (32) where for the first equality we have used the fact that �πt(I, K, {a, g}) is also monotone in t, since each new event does not decrease the total number of particles in K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ It follows that supt∈[0,T] �πt(I, K, {a, g}) is finite P ω–almost surely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice in particular that this implies that �πt,N(I, K, U) = �πt,M(I, K, U) for all N, M large enough.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' If this was not the case, we would have an infinite sequence of initial particles, coming from arbitrary far away, whose progeny would enter K before time T, thus making �πT (I, K, {a, g}) explode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The following holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (i) For any t ≥ 0 and f : I × Rn × {a, g} → R measurable and compactly supported in the second coordinate: Eω� � t 0 � i∈As−,y∈V ��f(i1, y, a) + f(i, Xi s−, g) − f(i, Xi s−, a) �� N Xi s−,y i (ds) � < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Eω� � t 0 � i∈As− ��f(i1, Xi s−, a) + f(i2, Xi s−, a) − f(i, Xi s−, a) �� N b i (ds) � < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (ii) For such functions f and t ≥ 0, the following identity holds a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ⟨�πt, f⟩ = ⟨�π0, f⟩ + � t 0 � i∈As−, y∈V � f(i1, y, a) + f(i, Xi s−, g) − f(i, Xi s−, a) � N Xi s−,y i (ds) + � t 0 � i∈As− � f(i1, Xi s−, a) + f(i2, Xi s−, a) − f(i, Xi s−, a) � N b i (ds).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The first part is a consequence (28) and (29) for bounded functions f with support on some compact set C ⊂ Rn, together with (32) which guarantees finiteness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We are left to show that (�πt) is a solution of equation (12) on any compact set, where now the initial population can be non bounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By choosing N0 large, the terms involved in (12) for (�πt,N)t∈[0,T] are all constant for N ≥ N0, which ends the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ 16 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI Recall that πt is the projection of �πt on alive particles, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' for B ⊂ Rn Borel set πt(B) := �πt(I × (B ∩ V ) × {a}).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For every f : Rn → R with compact support, we get ⟨πt, f⟩ = ⟨π0, f⟩ + � t 0 � i∈As−,y∈V � f(y) − f(Xi s−) � N Xi s−,y i (ds) + � t 0 � i∈As− f(Xi s−) N b i (ds) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (33) We can now justify that the generator of this process is given by (2) and end the proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' More precisely, let us check that for all G compactly supported on Rn, MG t = ⟨πt, G⟩ − ⟨π0, G⟩ − � t 0 LfG(ηs) ds is indeed a martingale, where we recall that ηt(x) = πt({x}) and for fG(η) = � x∈V G(x)η(x) LfG(η) = � x,y∈V η(x)r(x, y) � G(y) − G(x) � + � x∈V η(x) � b − d � G(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (34) The fact that ⟨πt, G⟩ is integrable is due to (31).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The fact that Eω[ � t 0 |LfG|(ηs)ds] is finite is due to (28) and (29), which allows us to bound this term by Eω[�πt(I, C, {a, g})].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Besides MG t = ⟨�π0, f⟩ + � t 0 � i∈As−, y∈V � f(i1, y, a) + f(i, Xi s−, g) − f(i, Xi s−, a) � � N Xi s−,y i (ds) + � t 0 � i∈As− � f(i1, Xi s−, a) + f(i2, Xi s−, a) − f(i, Xi s−, a) � � N b i (ds), where � Ni and � N b i are the compensated Poisson point measures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Again, (28) and (29) pro- vide the integrability condition for stochastic L1 martingale with jumps, see for example [Ikeda and Watanabe(1989)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Thus, MG inherits the martingale property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This ensures that L provides the generator for functions of the form fG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Input from homogenization In this section we set the homogenization tools that are needed to prove the hydrodynamic limit in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that the results we collect are mainly inherent to the environment ω ∈ Ω: the specific particle dynamics we are analyzing only enters in these results through the generator of the simple random walk LN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Assumptions for homogenization on point processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In [Faggionato(2022a)] Fag- gionato proves homogenization for a wide class of random walks on purely atomic measures on Rn under some regularity assumptions for the environment, called (A1),.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=',(A9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Our proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2 relies on these homogenization results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We first state these assumptions in a sim- plified way, adapted to our context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We check then that they are indeed satisfied by our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider the Abelian group G = Rn acting on a probability space (Ω, P, F) in the following way (see (P1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , (P4) in [Faggionato(2022a)]): for g ∈ G we consider the measurable map θg : Ω → Ω such that θ0 is the identity;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' θg ◦θg′ = θg+g′ for all g, g′ ∈ G;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' the map (g, ω) → θgω is measurable;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' P ◦ θ−1 g = P for all g ∈ G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The group G acts also on the space Rn as space- translations (τg)g∈G such that τgx = x + g for all g ∈ G and x ∈ Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Suppose to have a BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS17 random purely atomic locally finite non-negative measure µω ∈ M(Rn) µω = � x∈ˆω nx(ω)δx, nx(ω) := µω({x}), ˆω := {x ∈ Rn : nx(ω) > 0} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let P0 be the Palm measure associated to P and E0 the associated expectation (see for example [Faggionato(2022a), equation (9)] for the precise definition or [Daley and Vere-Jones(2008)] for a more complete account of Palm measures).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Finally let r : (ω, x, y) → r(ω, x, y) ∈ [0, ∞) be the jump rates with r(ω, x, x) = 0 for all x ∈ Rn and ω ∈ Ω, and r(ω, x, y) = 0 when x or y is not in ˆω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then the nine assumptions are the following, with Ω∗ some measurable, translation invariant subset of Ω with P(Ω∗) = 1: (A1) P is stationary and ergodic w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (θg)g∈G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' That is, P ◦ θ−1 g = P for all g ∈ G and, for each A ⊆ Ω such that A = θgA for all g ∈ G, one has P(A) ∈ {0, 1};' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A2) 0 < E[µω([0, 1)n)] < ∞;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A3) for all ω ∈ Ω∗ and all g ̸= g′ it holds θgω ̸= θg′ω;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A4) for all ω ∈ Ω∗, µω is G–stationary: for all x, y ∈ Rn and for all g ∈ G it holds µθgω = τgµω and r(θgω, x, y) = r(ω, τgx, τgy);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A5) for all ω ∈ Ω∗ and for all x, y ∈ ˆω it holds nx(ω)r(ω, x, y) = ny(ω)r(ω, y, x);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A6) for all ω ∈ Ω∗ and for all x, y ∈ ˆω there exists a path x = x0, x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , xn−1, xn = y such that r(ω, xi, xi+1) > 0 for all i = 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , n − 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A7) E0 � � x∈ˆω r(ω, 0, x)|x|k� < ∞ for k = 0, 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A8) L2(P0) is separable;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A9) setting Nz(ω) := µω(z + [0, 1)n) for z ∈ Zn, it holds E[N 2 0 ] < ∞ and, for some C ≥ 0, |Cov(Nz, Nz′)| ≤ C|z − z′|−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We prove now these assumptions are satisfied for our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The complete graph G = (V, E) on a Poisson point process of parameter γ > 0 in Rn with transition rates r(x, y) = r(y, x) = e−∥x−y∥ (with the convention r(x, x) = 0) satisfies assumptions (A1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , (A9) of [Faggionato(2022a)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof of Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In our case µω is the point measure associated to the Poisson point process, so that we have almost surely nx(ω) = 1 for each point x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We also notice that ˆω coincides with V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A1), (A2) and (A3) clearly hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A4) and (A5) also come from the stationarity of the Poisson point process and from our choice of the rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A6) is trivial since we are considering the complete graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For (A7) and (A8), we mention that the Palm measure associated to the underlying Poisson point process can be obtained by just adding an additional point to the configuration at the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (A7) is easy to verify, while for (A8) see the comment at the end of Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4 in [Faggionato(2022a)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Finally, Nz is just the number of points in the box z + [0, 1)n, so that E[N 2 0 ] = γ2 + γ < ∞ and the covariance appearing in (A9) is just equal to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The Poisson equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Fix ω ∈ Ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Recall from (4) that LN is the generator of the diffusively rescaled random walk on V/N := {x/N : x ∈ V (ω)} with transition rates N 2r(·, ·).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We think of LN as acting on functions in L2(µN), where µN = µN(ω) is the uniform measure on V/N, that is µN := N −n � x∈V δx/N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 18 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI We write (·, ·)µN and ∥ · ∥L2(µN ) for, respectively, the scalar product and the norm in L2(µN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Note that LN is a negative-definite symmetric operator: for any f, g ∈ L2(µN) (f, LNg)µN = (LNf, g)µN and (f, −LNf)µN ≥ 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The following definition is justified by the fact that LN should approach in some sense the continuous operator σ2∆.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Given λ > 0, G ∈ C∞ c (Rn) and N ∈ N, we define Gλ N to be the unique element in L2(µN) such that λGλ N − LNGλ N = HN (35) where HN is the restriction to V/N of the function H = H(λ) = λG − σ2∆G ∈ C∞ c (Rn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that the introduction of λ > 0 is just an artifice to make λId − LN invertible, where Id is the identity operator, and that λ will be fixed and play basically no role in what follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The idea for introducing Gλ N is that LNGλ N is more regular than LNG (for example inequality (38) here below might fail for a general G).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This regularizing procedure is associated to the so- called corrected empirical measure in the literature, see [Gonçalves and Jara(2008)] for more comments on this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The next result is where homogenization theory enters the game, and in particular the results of [Faggionato(2022a)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Fix λ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then for P-a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' ω and for each G ∈ C∞ c (Rn) it holds (Gλ N, −LNGλ N)µN ≤ c(λ, G) (36) ∥Gλ N∥L1(µN) , ∥Gλ N∥L2(µN) ≤ c(λ, G) (37) ∥LNGλ N∥L1(µN) , ∥LNGλ N∥L2(µN ) ≤ c(λ, G) , (38) where c(λ, G) > 0 is a constant not depending on N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Furthermore lim N→∞ ∥Gλ N − G∥L1(µN ) = 0 (39) lim N→∞ ∥Gλ N − G∥L2(µN ) = 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (40) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We follow the proof of [Faggionato(2010), Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By taking the scalar product with Gλ N in the left and right hand sides of (35) and using Cauchy-Schwarz inequality we get λ∥Gλ N∥2 L2(µN ) + (−LNGλ N, Gλ N)µN = (HN, Gλ N)µN ≤ ∥Gλ N∥L2(µN)∥HN∥L2(µN ) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that Gλ N ∈ L2(µN) and that supN∈N ∥Hλ N∥L2(µN ) < ∞, so that we don’t have a trivial inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Recalling that (−LNGλ N, Gλ N) ≥ 0 by negativity of the operator LN and noticing that ∥Gλ N∥L2(µN ) appears with a square on the l.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', we obtain the L2 bound in (37).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' As a consequence we also get (36) and, since LNGλ N = λGλ N + HN, the L2 bound in (38).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For the L1 bounds we need the integral representation Gλ N(x/N) = � y∈V � ∞ 0 e−λtpN t (x/N, y/N)HN(y/N) dt x ∈ V (41) where pN t (x/N, y/N) indicates the probability that the random walk on V/N with generator LN started at x/N is in y/N at time t ≥ 0, for x, y ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Since by reversibility pN t (x/N, y/N) = BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS19 pN t (y/N, x/N), ∥Gλ N∥L1(µN ) ≤ 1 N n � x,y∈V � ∞ 0 e−λtpN t (y/N, x/N)|HN(y/N)| dt (42) = 1 λ∥HN∥L1(µN) N→∞ −−−−→ 1 λ∥H∥L1(Rn) < ∞, (43) where H = λG− σ2∆G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This proves the first inequality of (37), and the L1 bound for LNGλ N in (38) follows as before since supN∈N ∥Hλ N∥L1(µN ) < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We move to the homogenization results (39) and (40).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 we can apply the results of [Faggionato(2022a)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By [Faggionato(2022a), Theorem 1] we know that Gλ N → G in the strong sense described in [Faggionato(2022a), Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' As pointed out in [Faggionato(2022a)], see discussion after formula (167) therein, if condition (A9) is fulfilled as in our case, then one also has the L2 convergence in (40), since G is compactly supported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Finally, we turn our attention to (39).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For ℓ > 0, using Cauchy Schwarz inequality, the quantity ∥Gλ N − G∥L1(µN ) can be upper bounded by ∥Gλ N(·)1{∥ · ∥>ℓ}∥L1(µN ) + ∥G(·)1{∥ · ∥>ℓ}∥L1(µN) + #{x ∈ BNℓ ∩ V }1/2 N n/2 ∥Gλ N − G∥L2(µN) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The second term is null for ℓ large enough, since G has compact support.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The third term goes to 0 as N → ∞ since the fraction is P–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' converging to a constant while ∥Gλ N − G∥L2(µN) goes to 0 by (40).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' It remains to show that lim sup ℓ→∞ lim sup N→∞ ∥Gλ N(·)1{∥ · ∥>ℓ}∥L1(µN) = 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (44) Since H ∈ C∞ c (Rn), we can find two non-negative functions H+, H− ∈ C∞ c (Rn) such that H− ≤ H ≤ H+ and so, for their restrictions H− N, H+ N to VN, it holds H− N ≤ HN ≤ H+ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Call f λ N, F λ N the solutions in L2(µN) of the equations λf λ N − LNf λ N = H− N λF λ N − LNF λ N = H+ N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' From (41) we derive that f λ N, F λ N are also non-negative and that −f λ N ≤ Gλ N ≤ F λ N on VN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In particular, in order to prove (44) we can just prove the same equation with f λ N and F λ N instead of Gλ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Therefore, without loss of generality, we can just assume that Gλ N and H are non-negative, the same proof working for Gλ N and H non-positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By (35) and an integral representation as in (41) we see that in this case the function G such that H = λG − σ2∆G is non-negative, too.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Cauchy Schwarz inequality yields ∥Gλ N(·)1{∥ · ∥>ℓ}∥L1(µN) = ∥Gλ N(·)∥L1(µN ) − ∥Gλ N(·)1{∥ · ∥≤ℓ}∥L1(µN ) ≤ ∥Gλ N(·)∥L1(µN) − ∥G(·)1{∥ · ∥≤ℓ}∥L1(µN ) + ∥(Gλ N(·) − G(·))1{∥ · ∥≤ℓ}∥L1(µN ) ≤ ∥Gλ N(·)∥L1(µN) − ∥G(·)1{∥ · ∥≤ℓ}∥L1(µN ) + #{x ∈ BNℓ ∩ V }1/2 N d/2 ∥Gλ N − G∥L2(µN ) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The third summand goes to 0 as N → ∞ as seen before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To handle the first summand, we notice that, since Gλ N and H are non-negative, the inequality in (42) is in fact an equality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Hence lim sup N→∞ ∥Gλ N(·)1{∥ · ∥>ℓ}∥L1(µN ) = ∥G(·)1{∥ · ∥>ℓ}∥L1(µN ), which is null for ℓ large enough, since G has compact support.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ 20 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI As a result of having to deal with a non-conservative system, in order to study the hydro- dynamic limits we will also have to control the L2(µN) norm of LNG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let G ∈ C∞ c (Rn) and n ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then, P–a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', lim N→∞ N −n∥LNG∥L2(µN) = 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' First of all we bound the second moment of ∥LNG∥L2(µN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Call SG the support of G and indicate with NSG the support blown by a factor N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We have E � ∥LNG∥2 L2(µN ) � = E � N −n � x∈V � � y∈V N 2r(x, y) � G(y/N) − G(x/N) ��2� (45) ≤ 2N 4−n� (A) + (B) + (C) � , (46) where (A) = E � � x∈NSG∩V � � y∈BR(x)∩V r(x, y) � G(y/N) − G(x/N) ��2� (B) = E � � x∈NSG∩V � � y∈Bc R(x)∩V r(x, y) � G(y/N) − G(x/N) ��2� (C) = E � � x∈(NSG)c∩V � � y∈NSG∩V r(x, y)G(y/N) �2� , where BR(x) is a ball around x of radius R = log N n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We proceed by estimating separately the three parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We can easily deal with part (B) thanks to Slivnyak-Mecke theorem (see [Moller and Waagepetersen(2003), Theorem 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3] or [Daley and Vere-Jones(2008), Chapter 13] for more general versions of the theorem), which yields (B) ≤ ∥G∥2 ∞ � x∈NSG � y /∈BR(x) � r(x, y)2 + r(x, y) � z /∈BR(x) r(x, z) dz � dy dx ≤ cN ne−R , (47) where the factor N n comes from the size of NSG and the factor e−R comes from the internal integrals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Developing the square and using again Slivnyak-Mecke theorem, term (C) becomes (C) = � x/∈NSG � y∈NSG r(x, y)2G(y/N)2 dy dx + � x/∈NSG � y∈NSG � z∈NSG r(x, y)r(x, z)G(y/N)G(z/N) dy dz dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (48) Since G ∈ C∞ c (Rn), G must be Lipshitz with Lipshitz constant, say, K > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Call d(x, A) the distance between x ∈ Rn and the border of the set A ⊂ Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Noticing that ∥x − y∥ ≥ d(x, NSG) + d(y, NSG) if x ̸∈ NSG and y ∈ SG, we see that the first double integral on the r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' of (48) is smaller than � x/∈NSG � y∈NSG e−2(d(x,NSG)+d(y,NSG))� K d(y, NSG) N �2 dy dx ≤ c1N −2 � x/∈NSG e−2d(x,NSG)N n−1 dx ≤ c2N d−3 BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS21 with c1, c2 > 0 constants that depend on G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Regarding the triple integral on the r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' of (48) we can do something similar and bound it by � x/∈NSG e−2d(x,NSG)� � y∈NSG e−2d(y,NSG)� K d(y, NSG) N � dy �2 dx ≤ cN 2n−4, with c > 0 a constant depending on G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Plugging these two last bounds back into (48) we got (C) ≤ cN 2n−4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (49) Finally we turn our attention to (A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We use once more Slivnyak-Mecke theorem and a first order Taylor approximation and obtain (A) = � x∈NSG E �� � y∈BR(x)∩V r(x, y) � � i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=',n yi − xi N d dxj G(x/N) + O � ∥x − y∥2/N 2���2� ≤ c1N −2∥∇G∥2 ∞ � x∈NSG E �� � y∈BR(x)∩V r(x, y) � ∥x − y∥ + ∥x − y∥2/N ��2� dx ≤ c2N −2∥∇G∥2 ∞ � x∈NSG U(x, R) dx ≤ c3N n−2 (50) where we have used the fact that U(x, R) := � y∈BR(x) � r(x, y)2∥x − y∥2 + � z∈BR(x) r(x, y)r(x, z)∥x − y∥ ∥x − z∥ dz � dy ≤ c for some c > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We finally put (47), (49) and (50) back into (46) to obtain that E[∥LNG∥2 L2(µN)] ≤ cN n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By Markov inequality we obtain now that, for all ε > 0, P(N −n∥LNG∥L2(µN) > ε) ≤ c ε−2N −n , which tells us that the sequence N −n∥LNG∥L2(µN) converges almost completely to 0 for n ≥ 2 and hence almost surely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' A non-conservative Kipnis–Varadhan estimate Recall the Domination & Convergence Assumption and in particular (6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For constants ρ > 0 and M ∈ N0, call νM,ρ(·) = νM,ρ(ω, ·) the measure that dominates all initial conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' That is, νM,ρ is the product measure on NV such that its restriction on each site x ∈ V is a Poisson random variable of parameter ρ plus the constant M ∈ N: νM,ρ � � x∈A [M + nx, ∞) � = � x∈A � ∞ � j=nx ρje−ρ j!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' � ∀A ⊂ V, (nx)x∈A ∈ N|A| .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (51) Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider an initial condition given by ν0,ρ, the product of Poisson random variables of parameter ρ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Under P ω ν0,ρ, let each particle perform an independent random walk on V/N with generator LN (without births nor deaths) and call (Yt)t≥0 the evolution of their configuration, so that Yt(x) is the number of particles in x ∈ V at time t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let H be a 22 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI nonnegative function on V/N belonging to L1(µN) ∩ L2(µN) and such that LNH belongs to L2(µN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then for any T, A > 0 it holds P ω ν0,ρ � sup 0≤t≤T 1 N n � x∈V Yt( x N )H( x N ) > A � ≤ c(ρ, T)A−1|||H|||N (52) P ω ν0,ρ � sup 0≤t≤T 1 N n � x∈V Yt( x N )2H( x N ) > A � ≤ ˜c(ρ, T)A−1 � |||H|||2 N + N 2−2n � x∈V r(x)H( x N ) (53) with c(ρ, T) = (ρ2 + ρ + Tρ)1/2, ˜c(ρ, T)2 a polynomial in ρ and T and |||H|||2 N := ∥H∥2 L1(µN) + N −n∥H∥L2(µN ) ∥LNH∥L2(µN ) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (54) Remark 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This sort of inequalities are typically carried out for all powers of the number of particles Y k t at once, at the only cost of a constant on the r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' varying with k, see for example [Faggionato(2010), Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In our setting, though, we cannot hope for such a “clean” result for all values of k, due to the irregularity of the support V = V (ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In the rest of the paper we only need k = 1, but we bound here also the case k = 2 for future interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof of Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The particle dynamics without births or deaths is reversible with re- spect to ν0,ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Hence, by Kipnis-Varadhan inequality ([Kipnis and Varadhan(1986)], see also [Kipnis and Landim(1998), Theorem 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 in Appendix 1]) we know that, for k ≥ 1, P ω ν0,ρ � sup 0≤t≤T 1 N n � x∈V Yt( x N )kH( x N ) > A � ≤ e A � ⟨g, g⟩ν0,ρ + T ⟨g, −N 2L∗g⟩ν0,ρ (55) where N 2L∗ is the generator of (Yt)t≥0 and g : NV → R is given by g(η) := 1 N n � x∈V gx(η)H(x/N), gx(η) := η(x)k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that L∗ corresponds to L appearing in (2) with b = d = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Now we calculate ⟨g, g⟩ν0,ρ = 1 N 2n � x,y∈V H(x/N)H(y/N)ν0,ρ[gxgy] ≤ c0(ρ, k)∥H∥2 L1(µN ) (56) where c0(ρ, k) = E[ξ2k ρ ] indicates the 2k-th moment of ξρ ∼Poisson(ρ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Moving to the second summand under the root in (55), we write ⟨g, −N 2L∗g⟩ν0,ρ = −N 2−2n � x,y∈V H(x/N)H(y/N)ν0,ρ[gx L∗gy] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (57) Besides, we have L∗gy(η) = η(y)r(y) � (η(y) − 1)k − η(y)k� + � z∈V η(z)r(z, y) � (η(y) + 1)k − η(y)k� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For x = y, we get ν0,ρ � gx L∗gx � = c1(ρ, k)r(x) with c1(ρ, k) = E � ξk+1 ρ ((ξρ − 1)k − ξk ρ) � + E[ξρ]E � ξk ρ((ξρ + 1)k − ξk ρ) � ≤ 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS23 For x ̸= y, using that L∗gy(η) − η(x)r(x, y) � (η(y) + 1)k − η(y)k) is independent of η(x) under ν0,ρ and that ν0,ρ[L∗f] = 0 for all f, ν0,ρ � gx L∗gy � = ν0,ρ � ηk(x) · η(x)r(x, y) � (η(y) + 1)k − η(y)k�� + ν0,ρ[ηk]ν0,ρ � L∗(gy) − η(x)r(x, y) � (η(y) + 1)k − η(y)k�� = c2(ρ, k)r(x, y) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' with c2(ρ, k) = � E[ξk+1 ρ ] − E[ξk ρ]E[ξρ] � E � (ξρ + 1)k − ξk ρ � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' When k = 1 we magically have c2(ρ, 1) = −c1(ρ, 1) = ρ, so that ⟨g, −L∗g⟩ν0,ρ = ρN 2−2n� � x∈V r(x)H(x/N)2 − � x̸=y∈V r(x, y)H(x/N)H(y/N) � = ρN −2n � x∈V H(x/N) � − N 2 � y̸=x r(x, y) � H(y/N) − H(x/N) �� = ρN −n⟨H, LNH⟩µN ≤ ρN −n∥H∥L2(µN ) ∥LNH∥L2(µN ) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Putting this and (56) back into (55) together with the fact that c0(ρ, 1) = E[ξ2 ρ] = ρ2 +ρ gives (52).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' When k = 2, explicit calculation yield c1(ρ, 2) = −ρ(4ρ2+8ρ+1) and c2(ρ, 2) = 4ρ3+4ρ2+ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' They do not cancel out as in the case k = 1 and as a consequence we have another term appearing from the term ⟨g, −L∗g⟩ν0,ρ, that is ⟨g, −L∗g⟩ν0,ρ ≤ c2(ρ, k)N −n∥H∥L2(µN ) ∥LNH∥L2(µN ) + ��c1(ρ, k) + c2(ρ, k) �� R with R = N 2−2n � x∈V r(x)H(x/N)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Putting the pieces together as before we obtain (53).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ Let us turn to the non-conservative case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let ηN 0 be an initial distribution of particles whose law is dominated by νM,ρ for some M ∈ N0 and ρ ≥ 0 in the sense of (6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let H be a nonnegative function on V/N belonging to L1(µN) and L2(µN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then there exist a constant c1 = c(M, ρ, T) > 0 and an absolute constant c2 > 0 such that P ω� sup 0≤t≤T 1 N n � x∈V ηN t (x)H(x/N) > A � ≤ A−1c1ec2bT |||H|||N (58) for all A > 0, where |||H|||N is defined in (54).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The probability appearing in (58) can be clearly upper bounded by the probability of the same event starting with a configuration sampled with νM,ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' As a first step, we would like to further bound the initial condition in order to have a pure product of a Poisson number of particles per site, which will allow us to use the result of Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 in the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To this end we first focus on the case M = 1, ρ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In this case we have that the initial condition ν1,0 24 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI is given by a single particle on each site of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Take two random variables X, Y ∼ Poisson(log 2) such that P(X + Y ≥ 1) = 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We can dominate ν1,0 by the random initial condition ¯ν given by the following: the num- ber of particles on site x ∈ V is given by X(x) + Y (x), with X(x) ∼ X and Y (x) ∼ Y and (X(x), Y (x))x∈V independent for different x ∈ V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Now we notice that if we want N −n � x∈V ηN t (x)H( x N ) to be greater than A, it must be that N −n � x∈V ηN,X t (x)H( x N ) is larger than A/2, with ηN,X t are the particles descending from initial particles “of type X”, or N −n � x∈V ηN,Y t (x)H( x N ) has to be greater than A/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' So with a union bound we get P ω ν1,0 � sup 0≤t≤T 1 N n � x∈V ηN t (x)H( x N ) > A � ≤ 2P ω ν0,log 2 � sup 0≤t≤T 1 N n � x∈V ηN t (x)H( x N ) > A/2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' It is straightforward to generalize the previous argument to the case M ≥ 1 and ρ ≥ 0 which yields P ω νM,ρ � sup 0≤t≤T 1 N n � x∈V ηN t (x)H( x N ) > A � ≤ (M + 1)P ω ν0,ρ∨log 2 � sup 0≤t≤T 1 N n � x∈V ηN t (x)H( x N ) > A M+1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' From this we see that, at the cost of a constant factor depending on M, we can prove (58) with initial particle configuration ν0,ρ, where we have replaced the original ρ with ρ ∨ log 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We use a new labelling notation for the particles, not to be confused with the one appearing in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The individuals at time 0 are labelled by N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To label their descendants, we introduce the binary tree J = ∪k∈N0{1, 2}k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For j = (j1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , jk) ∈ J , k ∈ N0 and n ∈ N, we write (n, j) = (n, j1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' , jk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In particular, (n, j) = (n, j1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', jk) is an individual of generation |j| = k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' When a particle (n, j) ∈ N×{1, 2}k reproduces, it disappears and generates particles (n, j1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', jk, 1) and (n, j1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=', jk, 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For a subset A = I × J with I ⊂ N and J ⊂ J , we write (ηN,A t ) for the process restricted to the subset of particles labelled by elements of A, that is, at time t we look at ηN t and ignore all the particles with labels not belonging to A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Since ηN t = � j∈J ηN,N×{j} t we have ΣN T := sup 0≤t≤T 1 N n � x∈V ηN t (x)H(x/N) ≤ � j∈J ΣN,j T where ΣN,j T := sup 0≤t≤T 1 N n � x∈V ηN,N×{j} t (x)H(x/N) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Using that � j∈J 4−|j| = � k≥0 2k4−k = 2, we can bound P ω ν0,ρ(ΣN T ≥ A) ≤ P ω ν0,ρ � ∪j∈J {ΣN,j T ≥ 4−|j|A/2} � ≤ � j∈J P ω ν0,ρ � ΣN,j T ≥ 4−|j|A/2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (59) BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS25 The key point is now to see that the process (ηN,N×{j} t ) can be dominated by another process (Y N,j t ), obtained by a percolation procedure on the initial distribution of particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' More precisely, Y N,j 0 is obtained from ηN 0 as follows: for ℓ ∈ N, the particle with label ℓ in ηN 0 is kept in Y N,j 0 only if particle (ℓ, j) is born before time T for the process (ηN t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that this happens with probability pj = P(Poisson(bT) ≥ |j|) = e−bT � k≥|j| (bT)k k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (60) since, along each lineage, birth events follow a Poisson process with intensity b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' If present in Y N,j 0 , then, particle ℓ evolves in the process (Y N,j t ) by following the trajectory of (ℓ, j) and its ancestors in (ηN 0 );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' once (ℓ, j) has disappeared in (ηN 0 ), the particle continues to evolve following the trajectory of any lineage of descendants of (ℓ, j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' From this coupling, it is clear that, for all t ∈ [0, T] and j ∈ J , ηN,N×{j} t ≤ Y N,j t and we have obtained P ω ν0,ρ � ΣN,j T ≥ 4−|j|A/2 � ≤ P ω ν0,ρ � sup 0≤t≤T 1 N n � x∈V Y N,j t (x)H(x/N) > 4−|j|A/2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' At this point, we can use Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1 for the process ( ˜Y N,j t )t∈[0,T] on V/N, with ˜Y N,j t (x/N) = Y N,j t (x) for all x ∈ V and t ∈ [0, T], since for this process the present particles just perform independent random walks on V/N generated by LN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We notice furthermore that the initial particles of process (Y N,j t ) have distribution ν0,ρpj, cfr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (60).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Hence P ω ν0,ρ � sup 0≤t≤T 1 N n � x∈VN Y N,j t (x)H(x/N) > 4−|j|A/2 � ≤ 4|j|2A−1c(ρpj, T)|||H|||N where the function c(·, ·) is the same appearing in Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Going back to (59) we have obtained P ω ν0,ρ(ΣN T ≥ A) ≤ � j∈J 4|j|2A−1c(ρpj, T)|||H|||N ≤ ¯c(ρ, T)A−1 � j∈J 4|j|p1/2 j .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (61) Recall that if X ∼ Poisson(λ) one has the bound P(X ≥ t) ≤ e−λ(eλ/t)t for all t > λ (see for example [Vershynin(2018), Exercise 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Using also that the number of j’s of length ℓ is 2ℓ, we set ¯ℓ = ⌈81ebT⌉ and compute � j∈J 4|j|p1/2 j ≤ ¯ℓ � ℓ=0 8ℓ + ∞ � ℓ=¯ℓ+1 8ℓ�ebT ℓ �ℓ/2 ≤ cec2bT with c, c2 > 0 absolute constants, which together with (61) yields the result of the lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' □ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2 As in Section 2, through the whole section we fix some realization of the underlying graph ω ∈ Ω sampled according to measure P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' All the processes in what follows will evolve under measure P ω, and all the claims have to be intended to be true P–almost surely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' 26 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' An L2 martingale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In this section we will pave the way for the proof of tightness of the sequence of process � (⟨πN t , G⟩)t∈[0,T] � N and identification of the limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For G ∈ C∞ c (Rn) let us define the process MN t = MN t (Gλ N) := ⟨πN t , Gλ N⟩ − ⟨πN 0 , Gλ N⟩ − � t 0 ⟨πN s , LNGλ N + bGλ N⟩ds .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (62) By Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3 and Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3, we know that MN is almost surely well defined when starting from some ηN 0 satisfying (6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We aim at proving the following result: Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Consider a sequence of initial configurations (ηN 0 )N∈N satisfying the Domination & Convergence Assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For all ε > 0 and for all G ∈ C∞ c (Rn) it holds lim N→∞ P ω� sup 0≤t≤T ��MN t �� ≥ ε � = 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' In fact, we will not only show Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1, but also that MN is a square integrable martingale which converges in L2 to 0 and obtain a speed of convergence, see next Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To do so, we will use a truncation argument and exploit the results already obtained in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3 while constructing the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' More precisely, for any a ∈ N, consider the process (πN,a t )t∈[0,T] (and the corresponding (ηN,a t )t∈[0,T]) where the initial configuration of particles is truncated outside the box [−a, a]n, that is, only the particles in the finite set V ∩ [−a, a]n are retained for the initial configuration and all the others are deleted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By (16), we know that all the particles of the process πN,a a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' remain in a finite box during time interval [0, T] (recall that �πBN t was the process restricted to a box of size BN and that for finite initial conditions πN t was obtained as the restriction to the second coordinate of the limit for N → ∞ of �πBN t , cfr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='4 and (13)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' It follows that the number of births and of jumps is a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' finite in a finite time interval and therefore the following equation holds for any locally bounded function H: ⟨πN,a t , H⟩ = ⟨πN,a 0 , H⟩ + 1 N n � t 0 � R+ � x,y∈V 1{u≤ηN,a s (x)N2r(x,y)} � H(y/N) − H(x/N) � N x,y(ds, du) + 1 N n � t 0 � R+ � x∈V 1{u≤bηN,a s (x)} H(x/N) Qx(ds, du) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (63) Notice that we have adopted here a slightly different description of the process for convenience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' The underlying Poisson point processes are indexed by sites and not by individuals as before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' That is, measures N x,y and Qx with intensity ds du on R2 + are replacing the previous N Xi s,y i and N b i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Equation (63) can be rewritten as ⟨πN,a t , H⟩ = ⟨πN,a 0 , H⟩ + � t 0 ⟨πN,a s , LNH + bH⟩ ds + MN,a t (H), (64) where MN,a(H) is defined by MN,a t (H) = 1 N n � t 0 � R+ � x,y∈V 1{u≤ηN,a s (x)N2r(x,y)} � H(y/N) − H(x/N) � � N x,y(ds, du) + 1 N n � t 0 � R+ � x∈V 1{u≤bηN,a s (x)} H(x/N) � Qx(ds, du), and � N x,y and �Qx are the compensated measures of N x,y and Qx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BRANCHING PROCESS AND HOMOGENEIZATION FOR EPIDEMICS ON SPATIAL RANDOM GRAPHS27 We turn our attention to H = Gλ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' On the one hand, ⟨πN,a t , Gλ N⟩ increases a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' as a → ∞ to ⟨πN t , Gλ N⟩, which is a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' finite (using for example (58) and Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' On the other hand, the fact that LNGλ N = λGλ N − HN (cfr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (35)) and (58) ensure that � t 0 ⟨πN s , |LNGλ N| + bGλ N⟩ ds < ∞ a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' and it follows by bounded convergence that a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' lim a→∞ � t 0 ⟨πN,a s , LNGλ N + bGλ N⟩ ds = � t 0 ⟨πN s , LNGλ N + bGλ N⟩ ds .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We obtain from (64) that for any t ≥ 0, MN,a t = MN,a t (Gλ N) converges a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' as a → ∞ to MN t , which is given by (74) and is a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' To wrap up, we have defined a càdlàg process (MN t )t∈[0,T] satisfying identity (74) and such that, for any t, MN t is the a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' limit of MN,a t , defined as an integral against compensated jump measures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Let us check now that these processes are also square integrable martingale and that they tend to 0 in L2 and probability as N → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' This in particular implies Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' For any N ≥ 1 and a > 0, MN,a and MN are càdlàg square integrable martin- gales and, for any T > 0, Eω� sup t≤T � MN,a t − MN t �2� a→∞ −→ 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Furthermore, for any a > 0 and N ≥ 1, Eω� sup t≤T (MN,a t )2� + Eω� sup t≤T (MN t )2� ≤ CT N n (65) for some constant CT which only depends on T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We first prove that MN,a = MN,a(Gλ N) is a square integrable martingale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Its quadratic variation is ⟨MN,a⟩t = N 2 N 2n � t 0 � x,y∈V ηN,a s (x)r(x, y) � Gλ N(y/N) − Gλ N(x/N) �2 ds + 1 N 2n � t 0 � x∈V bηN,a s (x)Gλ N(x/N)2 ds .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Since Eω[ηN,a s (x)] ≤ Eω[ηN s (x)] ≤ Cebs (cfr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' equation (31) and recall that ηs is the projection on alive a–particles for Zs) we get Eω � ⟨MN,a⟩t � ≤ C′ebt� N 2 N 2n � x,y∈V r(x, y) � Gλ N(y/N) − Gλ N(x/N) �2 + 1 N 2n � x∈V bGλ N(x/N)2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' (66) Rewriting r(x, y) � Gλ N(y/N) − Gλ N(x/N) �2 = −r(x, y)Gλ N(x/N) � Gλ N(y/N) − Gλ N(x/N) � − r(y, x)Gλ N(y/N) � Gλ N(x/N) − Gλ N(y/N) � 28 V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' BANSAYE AND M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' SALVI we obtain Eω � ⟨MN,a⟩t � ≤ C′′e2bt N n � (Gλ N, −LNGλ N)µN + ∥Gλ N∥2 L2(µN ) � (67) which is finite by (36) and (37).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' It follows that MN,a is a square integrable martingale and using Doob’s inequality we also obtain the relative L2 bound appearing in (65).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' We prove now by Cauchy criterion that MN,a converges to some right-continuous square integrable martingale, since the space of L2 right-continuous martingales is complete (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' [Ikeda and Watanabe(1989), Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content='1]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' By uniqueness, this limit will then have to be MN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Notice that (65) will automatically follow, since the L2 bound for MN can be derived from that of MN,a taking the limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' More precisely let a < a′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DNE4T4oBgHgl3EQfGAw7/content/2301.04890v1.pdf'} +page_content=' Then MN,a′ t − MN,a t = 1 N n � t 0 � R+ � x,y∈V 1� ηN,a s 7 in order to construct initial conditions consis- +tent with random large-scale structures. The magnetic-field +components Bx and By are then computed by straightfor- +ward derivatives. Finally, to explore a regime of strongly per- +turbed field lines, we fix the amplitude of the fluctuations to +be ⟨B⊥⟩/B0 ∼ 1, where ⟨B⊥⟩ is the root-mean-square value +of the in-plane fluctuations. This choice leads to a broader +particle energy distribution, while in N¨attil¨a & Beloborodov + +MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE +3 +10−4 +10−3 +10−2 +10−1 +100 +βtot +10−1 +100 +101 +T := Te/Tp +T0 = 0.1 +T0 = 1.0 +T0 = 10 +t = 2 tA +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +joint PDF +Figure 1. Initial and final values at time t = 2 tA of the normalized +joint PDFs of the temperature ratio T and of the total β parameter, +βtot := βe + βp. The data refers to three representative simulations +with initial temperature ratio T0 = 0.1, 1.0 and 10. The inset shows +the 90% contour lines of the joint PDFs, while the circles mark the +maxima of each PDF. Note that all PDFs converge to the same final +area in the (T , βtot) plane despite the very different initial data. +(2022) the authors showed that when the amplitude is small, +the particle energy distribution is quasi-thermal. +Other quantities that will be referred to in the rest of the +paper are the Alfv´en crossing time as tA := L/vA, where +vA := c +� +σ/(1 + σ) is the Alfv´en speed. The plasma mag- +netization is instead defined as σ := B2 +0/(4πw), where w is +the enthalpy density of the plasma w := (ρe+ρp)c2+Γeϵe+ +Γpϵp, with ρe,p and ϵe,p being, respectively, the rest-mass +densities and the internal energy densities of electrons and +protons when following an ideal-fluid equation of state (Rez- +zolla & Zanotti 2013). +As the simulation proceeds, turbulent magnetic reconnec- +tion takes place, leading to a nonlinear change in magnetic +topology and converting magnetic energy into kinetic and in- +ternal energy. This process strongly affects the dynamics of +the plasma on all the scales we could reproduce with our sim- +ulations. This highly dynamical system evolves with mag- +netic flux ropes moving, colliding, and sometimes repelling +each other depending on the magnetic-field polarity. This dy- +namics proceeds till a stationary state is achieved after about +an Alfv´en crossing time (see also the top panels of Fig. 2 and +the Appendix for a detailed discussion). +3. RESULTS +At the initial time, after fixing β and σ for each simula- +tion, we set the temperatures, which are uniform for both +species. In particular, we first specify the proton-βp param- +eter and then obtain the electron temperature so as to have +a specific initial temperature ratio T0 := Te,0/Tp,0. At any +time during the simulation we measure the spatial distribu- +tions in the (x, y) plane of βtot := βe + βp and T , from +which we compute the joint PDFs for the two quantities, +namely, (βtot, T ). The temperature for each species is com- +puted from Tα := pα/nαkB, where pα is the isotropic pres- +sure, i.e., pα := 1 +3(pxx +α + pyy +α + pzz +α ) and pij +α is the pressure +tensor. +This is shown in Fig. 1, where we report the joint PDFs +at the initial (t = 0) and final (t = 2 tA) times for three +representative simulations initialized respectively with T0 = +0.1, 1.0, and 10. Clearly, the three initial setups have differ- +ent joint PDFs narrowly distributed around the three initial +values of the temperature ratio T0. Interestingly, however, at +the final time they have all converged to the same equilib- +rium distribution, irrespective of the initial data. This can be +best appreciated in the inset, which reports a zoom-in of the +central region of the final distributions, with the color-coded +contour reporting the 90%-value for each simulation, while +the circles represent the maximum of each joint PDF. This +convergence has been verified to take place for four different +values of the initial temperature ratio (T0 = 0.01, 0.1, 1.0 +and 10.0), while keeping σ = 0.3 and β = 3 × 10−4. +The behaviour in Fig. 1 induces us to conjecture that the +choice of the initial temperature T0 is effectively unimpor- +tant at least in the ranges explored here1 as its memory is +lost by the time the system has reached a steady state. In +view of this, we set T0 = 1.0 for the 35 simulations per- +formed varying σ and β (note that with such initial temper- +ature ratio, the plasma-β parameter is the same for electrons +and protons, i.e., βe = βp =: β). The ranges of σ and β ex- +plored are compatible with previous kinetic studies, state-of- +the-art GRMHD simulations, and radiative-transfer calcula- +tions (Ball et al. 2018; Cruz-Osorio et al. 2022; Fromm et al. +2022). As also noted by Pecora et al. (2019), higher values of +β would require a much higher number of particles to counter +the statistical noise, making purely PIC calculations of this +type computationally expensive with modern resources. +Figure 2 provides a very compact but powerful overview +of the fully developed turbulent state for a simulation with +σ = 1.0 and β = 3 × 10−3, at time t = 1.5 tA. Each up- +per panel is split into two regions reporting different plasma +properties. +Panel (a) shows the electron number density +ne normalized to the initial number of particles per cell n0 +(left), and the magnetization σ (right). Panel (b), instead, re- +ports temperature ratio T (left) and the out-of-plane electric- +1 A word of caution: we have shown the initial temperature to be irrelevant +once turbulence is developed for a specific set of initial values of β and +σ. Given the physical arguments given above, extending this conclusion to +different initial values is a conjecture that is reasonable but challenging to +prove, especially for β ∼ 1. + +4 +MERINGOLO ET AL. +current density Jz (right). Note how, in analogy to nonrela- +tivistic kinetic simulations, vortex-like and sheet-like struc- +tures corresponding to magnetic flux tubes are present at all +the scales that are resolved in the simulation (Servidio et al. +2012; Comisso & Sironi 2018; Parashar et al. 2018; Pecora +et al. 2019). High number-densities “magnetic islands” can +be found in large-scale flux tubes, and in general, the density +is larger in these coherent quasi-circular structures. +At the same time, the largest temperatures (ratios) are not +achieved at the center of the islands, which are instead com- +paratively cooler. This is because the temperature is higher +between flux tubes, where reconnection layers lead to the for- +mation of plasmoids within narrow current sheets (Servidio +et al. 2009; Comisso & Sironi 2018; Pezzi et al. 2021). Elon- +gated unstable current sheets tend to fragment into chains +of plasmoids and small-size current sheets are appear on a +wide range of scales (Hellinger et al. 2015; Dong et al. 2018; +Huang & Bhattacharjee 2016). Notice also that the out-of- +plane electric-current density Jz shows a variety of current +sheets of different sizes. Some of these current layers break +into smaller plasmoids and these regions are important for the +heating of the plasma and the acceleration of the particles. +The various quantities shown in Fig. 2 are overlaid with +the trajectories of some of the most energized particles that +we tracked (protons in the left panels and electrons in the +right ones). In particular, we track a sample of 500 electrons +and 500 protons during the whole simulation, both randomly +chosen. The starting-position of each particle is marked with +a star. Note how, quite generically, and in addition to the +basic gyrations at the corresponding Larmor radii, there are +particles that have closed orbits as they are trapped in a flux +rope, while others experience turnovers that suddenly bend +the trajectory, similarly to what observed in nonrelativistic +turbulence simulations (Pecora et al. 2018)2. Overall, when +a particle experiences a reconnection process and is accel- +erated, it increases abruptly its Larmor radius, but also its +Lorentz factor γ, and kinetic energy. +In the lower panels (c) and (d) of Fig. 2 we show the evolu- +tion of the Lorentz factor of the particles tracked in the upper +panels (a) and (b), with protons being reported in panel (c) +and electrons in panel (d). As expected, and shown by the +different vertical scales of panels (c) and (d), electrons expe- +rience considerably larger accelerations when compared to +protons. This is simply due to the different masses of the two +species: electrons, which have smaller Larmor radius, are +more efficiently accelerated by the thin current sheets where +magnetic reconnection takes place. This stochastic acceler- +2 When the turbulence is fully developed, the velocity distribution of the elec- +trons is highly nonthermal and their Larmor radius is significantly larger as +a result of the large accelerations, and this effectively increases our resolu- +tion. +ation mechanism of multi-reconnection events is very effi- +cient and commonly observed in astrophysical plasma turbu- +lence (Drake et al. 2009; Haynes et al. 2014). +The tracked particles start from γ ≳ 1, and most of +them experience a sudden acceleration episode, and then a +sequence of second-order Fermi-like processes of accelera- +tion (Comisso & Sironi 2018, 2019). Particles trapped in +magnetic islands show a Lorentz factor increasing in time +(e.g., the red proton in the left panels). Other particles, in- +stead gain energy only once and then reach a quasi-steady +state as is typical for particles entering the magnetic island +only for a short time and then being bounced in a stochastic +manner between different structures. +Relativistic hydrodynamical turbulence naturally provides +a landscape of intermittency and large spatial variance be- +cause the compressibility is enhanced by relativistic ef- +fects (Radice & Rezzolla 2013); in addition, relativistic mag- +netohydrodynamical turbulence provides the natural condi- +tions to produce extreme-acceleration events and to generate +a large population of particles – electrons in particular – with +energy distributions that differ significantly from a thermal +one +(see, e.g., Zhdankin et al. 2017). This is summarized +in Fig. 3, which reports the electron energy-distribution func- +tions (spectra) (γ − 1)dN/dγ at t = 2 tA as a function of the +Lorentz factor γ − 1, for some representative simulations. +More specifically, the upper panel shows the electron spectra +from simulations with σ = 0.3 and for a wide range of values +of β; the black dashed line is a Maxwell-J¨uttner distribution +where the value of the dimensionless electron temperature +θe := kBTe/(mec2) = 45 is chosen to reproduce the low- +energy part of the spectrum for the case β = 0.11 and is +obviously different for each simulation. Note that the high- +energy part of the spectra is well approximated by a power- +law dN/dγ ∝ γ−κ+1 (Davelaar et al. 2019; Fromm et al. +2021), whose index κ ≃ 3.2 is quite insensitive to the value +of the plasma-β parameter in the range β ≲ 3 × 10−3 (see +black dotted line). For very large values of β, however, a sin- +gle power law does not represent the distribution accurately, +and only the very high-energy part of the spectrum maintains +an index κ ≃ 3.8. +In the bottom panel of Fig. 3, we instead explore how the +electron-energy spectra change when varying σ while keep- +ing β = 0.01. +Note that as the magnetization increases, +the amount of magnetic energy available for dissipation in- +creases, leading to a systematic shift towards progressively +larger energies of the spectra. Furthermore, the high-energy +part of the spectra are well approximated by power laws with +indexes κ ≃ 3 − 4, while the highest regions of the spec- +tra terminate with increasingly harder slopes. Overall, and +in agreement with several previous works (Comisso & Sironi +2018) – some of which even have different initial conditions +(Werner et al. 2018; Ball et al. 2018) – our results clearly in- + +MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE +5 +0 +1 +2 +3 +−0.5 +0.0 +0.5 +ne/n0 +log10 σ +(a) +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +t [tA] +0 +1 +2 +γ − 1 +protons +(c) +−1.0 +−0.5 +0.0 +0.5 +−0.1 +0.0 +0.1 +log10 T +Jz +(b) +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +t [tA] +0 +1000 +2000 +3000 +electrons +(d) +Figure 2. Representative quantities in a fully developed 2D turbulence at t = 1.5 tA for a representative simulation with σ = 1 and β = +3 × 10−3. The top panels offer a dual view of: the electron number density normalized to the initial value ne/n0 and of the magnetization σ +[panel (a)], and of temperature ratio T and of the total current density Jz [panel (b)]. Also overplotted with different colors are representative +particle trajectories, with protons on the left and electrons on the right of each panel (the the initial position of each particle is marked with a +star). The lower panels [(c) and (d)] report instead the evolution of the Lorentz factor for the same particles marked above. +dicate that turbulence promotes the particle acceleration, pro- +ducing energy distributions that contain a considerable frac- +tion of very energetic (suprathermal) particles. +Given the kinetic behaviour of the plasmas described so +far, it is essential to be able to express their properties via an- +alytic fitting functions and in terms of the basic parameters +of the plasma, namely, β, σ, so that the resulting expressions +can then be employed directly in the GRMHD modelling of +astrophysical plasma. A summary of this analytical mod- +elling is presented in Fig. 4, where in the top row we show as +a function of β and σ, respectively, the electron spectral index +κ, the nonthermal energy efficiency E, and the temperature +ratio T . Note that the data reported in the first two columns +refers to simulations at t = 2 tA, while that in the right col- +umn is averaged over the time window 1.7 < t/tA < 2.3 +to avoid the oscillations introduced by the stochastic behav- +ior of turbulence. Similarly, the bottom row of Fig. 4 reports +one-dimensional cuts of the same quantities, but at fixed val- +ues of the magnetization (σ = 0.1 − 3.0), where each circle +refers to a distinct simulation of our set. Note that for any +fixed value of σ we explored plasma parameters up to the +maximum one βmax ∼ 1/(4σ) (Ball et al. 2018), where our +estimates are inevitably less accurate. +Exploiting the large set of simulations performed, we can +now construct analytical 2D fits to the various quantities, +starting with the electron spectral index κ(β, σ), which can +be expressed as +κ(β, σ) = k0 + k1 +√σ + k2 σ−6/10 tanh +� +k3 β σ1/3� +, +(1) +where k0 = 2.8, k1 = 0.2, k2 = 1.6 and k3 = 2.25 (see +top-left panel of Fig. 4). Note that Zhdankin et al. (2017) +have proposed a similar but simpler fitting expression which +depends σ only and thus does not account for variations in +the plasma β. Overall, the spectral index shows two main + +5000 +4000 +3000 +2000 +S +1000 +1000 +20009 +300.09 +3000 +4000 +5000 +[de]5000 - +4000 +3000 +2000 +1000 +1000 +20003000 +4000 +5000 +[de]6 +MERINGOLO ET AL. +104 +105 +106 +107 +108 +109 +(γ − 1)dN/dγ +σ = 0.3 +β = 1.0 × 10−4 +β = 3.0 × 10−4 +β = 1.0 × 10−3 +β = 3.0 × 10−3 +β = 3.0 × 10−2 +β = 1.1 × 10−1 +Maxwell-J¨uttner, θe = 45 +κ = 3.2 +101 +102 +103 +104 +γ − 1 +104 +105 +106 +107 +108 +109 +(γ − 1)dN/dγ +β = 0.01 +σ = 0.1 +σ = 0.3 +σ = 1.0 +σ = 3.0 +Figure 3. Top panel: electron-energy spectra at t = 2 tA for sim- +ulations with σ = 0.3 and different values of β; indicated with a +dashed line is the a Maxwell-J¨uttner distribution for β ≃ 0.1, while +the dotted line indicates the almost constant spectral index κ ≃ 3.2. +Bottom panel: Same as above, but for simulations with β = 0.01 +and different values of σ. +features. First, at fixed σ, the spectral index is essentially +independent of β, for β ≲ 10−2, but it increases at larger +values of β, approaching a very steep tail. Second, at fixed +β, the index becomes generally smaller for increasing values +of σ. +Next, we quantify the efficiency in the production of par- +ticles with nonthermal energies in terms of the weighted av- +erage of the excess over a Maxwell-J¨uttner distribution (Ball +et al. 2018), namely +E := +� ∞ +γ0 [dN/dγ − fMJ(γ, θ)] (γ − 1) dγ +� ∞ +γ0 (dN/dγ)(γ − 1) dγ +, +(2) +where +γ0 +denotes +the +peak +of +the +spectrum, +fMJ := γ2v/[c θeK2(1/θe)]e−γ/θe, with v the velocity and +K2 the modified Bessel function of the second kind. The +corresponding 2D fit of the data can then be expressed as +E(β, σ) = e0 + e1 +√σ + e2 σ1/10 tanh +� +e3 β σ1/10� +, +(3) +where e0 = 1.0, e1 = −0.23, e2 = 0.5 and e3 = −10.18 +(see top-middle panel of Fig. 4). Also in this case, the energy +efficiency shows three main features. First, for β ≲ 10−2 +the efficiency saturates at a value that is independent of β, +but systematically larger for higher values of σ. Second, for +high values of β and low values of σ, it approaches E ∼ 0, +because the electron spectrum becomes significantly softer. +Third, for higher values of σ, the efficiency is the largest, +since the spectra widen to larger electron energies. Interest- +ingly, these results are similar to the ones found by Ball et al. +(2018) when using different initial conditions. +Finally, we consider what is arguably the most important +quantity modelled in our simulations, namely, the depen- +dence of the temperature ratio on the plasma properties. The +corresponding 2D fit is given by +T (β, σ) = t0 + t1 στ1 tanh [t2 β στ2] ++ t2 στ3 tanh [t3 βτ4 σ] , +(4) +where t0 = 0.4, t1 = 0.25, t2 = 5.75, t3 = 0.037, and +τ1 = −0.5, τ2 = 0.95, τ3 = −0.3, τ4 = −0.05 (see top- +right panel of Fig. 4). Overall, it is easy to see that for low +magnetizations, i.e., σ ∈ [0.1, 0.3], and small values of the +β parameter, i.e., β ≲ 0.01, the temperature ratio is essen- +tially constant and then starts to grow to values as large as +T ≃ 1 for β ≲ 1.0. On the other hand, for high values of +the magnetization, i.e., σ ≃ 3.0, the behavior is quite the op- +posite, the values of T are higher for lower β and decrease +when increasing β. For intermediate values of the magne- +tization, i.e., σ = 1.0, the behavior is a combination of the +two described above, showing a nonmonotonic dependence +for β ∈ [0.01, 0.1]. Interestingly, in all cases, T +∼ 1.0 +for β ≃ 1, independently of the value of σ, thus highlight- +ing that, under these conditions, electrons and protons are +fully coupled and have roughly the same temperature. Con- +versely, for β ≲ 10−4, the temperature ratio will depend on +the plasma magnetization, being larger for larger magnetiza- +tions, as expected for regimes where electrons can be accel- +erated to suprathermal energies at reconnection sites. More +importantly, expression (4) provides a compact and micro- +physically consistent description of the electron temperatures +that can be employed in modern GRMHD codes of accretion +flows onto black holes. +We conclude the discussion of our results by returning to +the behaviour of the electron spectral index κ. As shown +in the top-left panel of Fig. 4 and summarized in Eq. (1), +electron acceleration is higher in low-β and high-σ turbulent +plasmas. As suggested already by Drake et al. (2009), this +behaviour may be due to the interaction of the electron orbits +with small-sized current sheets; such a mechanism can then +extract particles from the thermal population and bring them +to very high energies via primary and secondary Fermi-like + +MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE +7 +10−4 +10−3 +10−2 +10−1 +100 +β +2 +4 +6 +8 +10 +σ = 0.1 +σ = 0.3 +σ = 1.0 +σ = 3.0 +10−4 +10−3 +10−2 +10−1 +100 +β +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +10−4 +10−3 +10−2 +10−1 +100 +β +0.4 +0.6 +0.8 +1.0 +1.2 +10−1 +100 +101 +σ +κ(β, σ) +2.85 +2.90 +3.00 +3.25 +3.65 +4.50 +7.00 +E(β, σ) +0.01 +0.20 +0.30 +0.30 +0.80 +0.60 +0.40 +0.80 +0.90 +T (β, σ) +0.45 +0.50 +0.60 +0.75 +1.00 +1.50 +2.15 +3 +4 +5 +6 +7 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +0.2 +0.7 +1.2 +1.7 +2.2 +Figure 4. Top panels: from the left to right are reported as a function of β and σ: the electron spectral index κ, the energy efficiency E, and the +temperature ratio T , respectively [see Eqs.(1)–(4)]. Bottom panels: Same as above, but at fixed values of the magnetization (σ = 0.1 − 3.0); +each circle refers to a distinct simulation. +mechanisms (Pecora et al. 2018; Comisso & Sironi 2018). +In fully developed GRMHD turbulence, accelerating islands +and current sheets are present on all scales and these could +therefore provide the natural site for the accelerating mecha- +nism. +In this simple picture, it is natural to expect that the larger +the spectrum of fluctuations at small scales, the more effi- +cient the accelerating mechanism (Haynes et al. 2014). To +validate whether this applies also to trans-relativistic plas- +mas, we have computed the (not normalized) isotropic power +spectrum of the magnetic field for three representative simu- +lations and reported them in Fig. 5 as a function of the dimen- +sionless kde [the inset shows with colored squares the loca- +tion in the (σ, β) plane of the three configurations, while the +arrows mark the wavevectors associated to the proton-skin +depth (kdp = 1) and to the proton Larmor radius (kρp = 1)] +and over a downsampled grid of (1024)2 (see Appendix for +a discussion). In essence, after assuming the turbulence to +be isotropic and homogeneous, we integrate the 2D Fourier +transforms �Bi over concentric shells (in this sense, the power +spectrum is isotropic) to obtain one-dimensional spectra, +whose sum we plot in Fig. 5 [note that the growth of the +power spectrum at large wavenumbers is a typical noise ef- +fect of PIC simulations due to a finite number of particles, +(see, e.g., Karimabadi et al. 2013)]. +In general, Fig. 5 reveals a number of interesting features, +moving in the parameter space from (low-β, high-σ) to (high- +β, low-σ). First, the power spectrum is clearly higher in the +case of the low-β, high-σ simulation, confirming a more effi- +cient cascade process (Franci et al. 2016). Second, the spec- +trum is shallower in the sub-ion inertial range (Sahraoui et al. +2009) indicating a more developed turbulence. Finally, and +more interestingly, the turbulent cascades terminate at much +smaller scales for (low-β, high-σ) simulations, suggesting +the existence of thinner current sheets at subproton scales that +accelerate particles more efficiently (Pecora et al. 2018). +4. DISCUSSION AND CONCLUSIONS +With the goal of gaining a deeper understanding of +the properties of plasmas near astrophysical compact ob- +jects, we have employed the PIC Zeltron code to carry +out a large campaign of two-dimensional simulations of +special-relativistic, decaying plasma turbulence in the trans- +relativistic regime. Particularly important in our analysis is +the use of a physical mass ratio between electrons and pro- + +8 +MERINGOLO ET AL. +10−2 +10−1 +kde +10−1 +100 +101 +102 +103 +104 +105 +Power Spectrum +kρp = 1 +kρp = 1 +kdp = 1 +k−2.45 +k−2.56 +k−2.75 +σ = 3.0 β = 0.0001 +σ = 0.3 β = 0.1 +σ = 0.1 β = 1.5 +β +σ +Figure 5. Magnetic-field power spectra for three simulations sam- +pling important locations in the (β, σ) space of parameters. Each +simulation is marked with a different color and the corresponding +location is shown in the inset, which reports also the electron spec- +tral index. Black dashed lines indicate the turbulent power laws, +while the circles delimit the boundaries of each turbulent range, +which we define as the limits of the power-law scaling; the arrows +mark the wavevectors associated to the proton-skin depth (kdp = 1) +and to the proton Larmor radius (kρp = 1), which is outside the +horizontal scale for the red line. +tons and the exploration of a wide range of values in the +plasma-β parameter (β = 10−4 − 1.5) and in the magnetiza- +tion σ (σ = 0.1−3.0). Having simulated such a large portion +of the space of parameters encountered in astrophysical plas- +mas has allowed us to derive analytical fitting functions for +the behaviour of a number of important plasma quantities as +a function of β and σ. More specifically, we have presented +2D fitting functions of the electron spectral index κ(β, σ), +of the efficiency in generating nonthermal particles E(β, σ), +and of the ratio between the electron and proton temperatures +T (β, σ). These expressions provide compact and reasonably +accurate descriptions of the behaviour of these microphys- +ical plasma properties and can be employed in a number of +scenarios involving compact objects and described by macro- +physical plasma characteristics. Importantly, since they have +been derived from first-principle calculations, they represent +a considerable improvement over the rather crude and purely +empirical expressions employed at the moment in GRMHD +simulations. Finally, we have confirmed the suggestion that +plasmas with low β and large σ naturally lead to broad turbu- +lent scenarios and are the most efficient in extracting particles +from the thermal population and accelerating them (Pecora +et al. 2018; Comisso & Sironi 2018). +As these simulations represent one of the most systematic +PIC explorations of trans-relativistic turbulence, can be em- +ployed in a wide range of astrophysical systems, such as jets +and accretion disks around supermassive black holes, and, of +course, their imaging (see, e.g., Event Horizon Telescope +Collaboration et al. 2019a, 2022a). The formulas provided +in this work can be improved by extending the present two- +dimensional treatment to three dimensions and thus assessing +the role played by dimensionality in studies of this type. +ACKNOWLEDGMENTS +We thank the Referee for the useful comments that have +improved our presentation. This research is supported by +the ERC Advanced Grant “JETSET: Launching, propaga- +tion and emission of relativistic jets from binary mergers and +across mass scales” (Grant No. 884631), by the Deutsche +Forschungsgemeinschaft (DFG, German Research Founda- +tion) through the CRC-TR 211 “Strong-interaction matter +under extreme conditions” (project number 315477589), and +by the State of Hesse within the Research Cluster ELE- +MENTS (Project ID 500/10.006). LR acknowledges the Wal- +ter Greiner Gesellschaft zur F¨orderung der physikalischen +Grundlagenforschung e.V. through the Carl W. Fueck Laure- +atus Chair. The simulations were performed on HPE Apollo +HAWK at the High Performance Computing Center Stuttgart +(HLRS) under the grant BNSMIC. +APPENDIX +In what follows, we provide additional information on our analysis concentrating on three specific aspects: a detailed summary +of the properties of the simulations carried out in the campaign, the evidence that stationarity is reached when extracting the +spectral information, and a comparison of simulations with different resolutions. +SUMMARY OF SIMULATIONS +Our systematic investigation of the β, σ space of parameters consists of 35 large-scale, high-resolution simulations whose +main properties are reported in Table 1. All these simulations were performed in two spatial dimensions with the real electron-to- +proton mass ratio, a physical-box size of L ∼ 5461 de (where de, we recall, is the electron-skin depth) in each of the two spatial +directions, and the same electron-to-proton initial temperature, i.e., T0 = 1. In addition, we have performed six simulations with +varying properties with respect to the main ones and reported in Table 2. + +MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE +9 +Run +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +σ +1.0e-1 1.0e-1 1.0e-1 1.0e-1 1.0e-1 1.0e-1 1.0e-1 1.0e-1 1.0e-1 1.0e-1 1.0e-1 3.0e-1 3.0e-1 3.0e-1 3.0e-1 3.0e-1 3.0e-1 3.0e-1 3.0e-1 3.0e-1 1.0e0 1.0e0 1.0e0 1.0e0 1.0e0 1.0e0 1.0e0 1.0e0 3.0e0 3.0e0 3.0e0 3.0e0 3.0e0 3.0e0 3.0e0 +β +1.0e-4 3.0e-4 1.0e-3 3.0e-3 1.0e-2 2.0e-2 1.0e-1 3.0e-1 7.0e-1 1.0e0 1.5e0 1.0e-4 3.0e-4 1.0e-3 3.0e-3 1.0e-2 3.0e-2 1.1e-1 3.4e-1 5.5e-1 1.0e-4 3.0e-4 1.0e-3 3.0e-3 1.0e-2 3.0e-2 1.0e-1 1.6e-1 1.0e-4 3.0e-4 1.0e-3 3.0e-3 1.0e-2 2.6e-2 5.5e-2 +θp +5.0e-6 1.5e-5 5.0e-5 1.5e-4 5.0e-4 1.0e-3 5.0e-3 2.0e-2 4.5e-2 6.8e-2 1.0e-1 1.5e-5 5.0e-5 1.5e-4 5.0e-4 1.5e-3 5.0e-3 2.0e-2 8.0e-2 2.0e-1 5.0e-5 1.5e-4 5.0e-4 1.5e-3 5.0e-3 1.5e-2 5.0e-2 2.0e-1 1.5e-4 5.0e-4 1.5e-3 5.0e-3 1.5e-2 5.0e-2 2.0e-1 +θe +9.2e-3 2.7e-2 9.2e-2 2.7e-1 9.2e-1 1.8e0 9.2e0 3.7e1 8.3e1 1.2e2 1.8e2 2.7e-2 9.2e-2 2.7e-1 9.2e-1 2.7e0 9.2e0 3.7e1 1.5e2 3.7e2 9.2e-2 2.7e-1 9.2e-1 2.7e0 9.2e0 2.7e1 9.2e1 3.7e2 2.7e-1 9.2e-1 2.7e0 9.2e0 2.7e1 9.2e1 3.7e2 +λD 9.6e-2 1.6e-1 3.0e-1 5.2e-1 9.6e-1 1.3e0 3.0e0 6.1e0 9.2e0 1.1e1 1.3e1 1.6e-1 3.0e-1 5.2e-1 9.6e-1 1.6e0 3.0e0 6.1e0 1.2e1 1.9e1 3.0e-1 5.2e-1 9.6e-1 1.6e0 3.0e0 5.2e0 9.6e0 1.9e1 5.2e-1 9.6e-1 1.6e0 3.0e0 5.2e0 9.6e0 1.9e1 +Table 1. Summary of the physical parameters of our main simulations, which are all performed with the real electron-to-proton mass ratio, +equal electron and proton initial temperatures, a resolution of three cells per electron-skin depth (de/dx = 3), and a box of size ∼ 5461 de in +both directions. From top to bottom we report: the number of the Run, the magnetization σ, the plasma β, the dimensionless temperatures θp,e +for protons and electrons respectively, and the Debye length λD in units of de. In all our simulations we have initialized each computational +cell with 10 particles (5 protons and 5 electrons). +Run +A1 +A2 +A3 +B1 +B2 +B3 +σ +3.0e-1 +3.0e-1 +3.0e-1 +3.0e-1 +3.0e-1 +3.0e-1 +βp +3.0e-4 +3.0e-4 +3.0e-4 +3.0e-4 +3.0e-4 +3.0e-4 +βe +3.0e-2 +3.0e-3 +3.0e-5 +3.0e-4 +3.0e-4 +3.0e-4 +θp +5.0e-5 +5.0e-5 +5.0e-5 +5.0e-5 +5.0e-5 +5.0e-5 +θe +9.18e0 9.18e-1 9.18e-3 9.18e-2 9.18e-2 9.18e-2 +T0 +1.0e-2 +1.0e-1 +1.0e+1 +1.0e0 +1.0e0 +1.0e0 +de/dx +3.0e0 +3.0e0 +3.0e0 +3.0e0 +6.0e0 +1.2e+1 +L/de +5.46e+3 5.46e+3 5.46e+3 2.73e+3 2.73e+3 2.73e+3 +Table 2. Table of simulation in which we varied different parameters. Runs A1-A3 have different initial T = Tp/Te (and hence different βe +and θe), while all other parameters (σ, βp, θp, de/dx, L/de) are the same. Runs B1-B3 have different values of the electron-skin depth per dx +and use a smaller physical box of 2730 de. From top to bottom we report: the number of the Run, the magnetization σ, the proton and electron +plasma β, the proton and electron dimensionless temperatures θp,e, the initial temperature ratio T0, the number of cells per electron-skin depth +(de/dx), and the physical box size in terms of electron-skin depth. +As a first test, to show that our final configuration is independent of the initial electron-to-proton temperature, we have varied +T0 spanning in the range [0.001−10.0] (see Runs A1-A3 in Table 2, see Fig.1). Note that for these configurations, the plasma β is +different for electrons and protons. Next, we checked that our results are insensitive to the choice of different (higher) resolutions +in terms of de/dx, increasing the resolution up to de/dx = 12 (see Runs B1-B3 in Table 2). In the latter case, we have used a +physical box of L/de = 2730 in both directions and varied the number of mesh points from (8192)2 up to (32768)2. In this last +high-resolution configuration, we have followed the dynamics of ∼ 1.1 × 1011 particles. +STATIONARITY OF SPECTRA +Next, we provide evidence that the computed electron-energy spectra reach a steady state after t/tA ≳ 1.8 − 2.0, so that +the extraction of the spectral index κ and of the efficiency E is both accurate and robust. Figure 6 shows four representative +simulations having different values of σ (see Runs 7, 18, 27, and 31 in Table 1). In each case, we plot the electron-energy spectra +at different times during the evolution as indicated by the colormap on the right of each of the four panels. Furthermore, marked +with black vertical lines of various type are three different values of the Lorentz factor γ − 1 and the corresponding evolutions +are shown in the bottom panels for each of the four simulations considered. Clearly, all cases show that by t/tA ∼ 2.0 the +simulations have reached stationarity with relative time variations that are ≲ 1.5%, so that κ and E can be extracted reliably. +RESOLUTION TESTS +Finally, we have verified that our results are insensitive to the choice of spatial resolution. In particular, we have performed +three simulations using an increasing number of cells per electron skin depth, from de/dx = 3 up to de/dx = 12 (see Runs + +10 +MERINGOLO ET AL. +0.5 +1.0 +1.5 +2.0 +2.5 +t/tA +0.5 +1.0 +1.5 +2.0 +2.5 +t/tA +Figure 6. Four representative simulations in which we show the stationarity of the electron-energy spectra (see Runs 7, 18, 27, and 31 in Table +2). For each simulation, we report the spectra at different times during the evolution as indicated by the colormap on the right of each of the four +panels. Marked with black vertical lines of various type are three different values of the Lorentz factor γ − 1 and the corresponding evolutions +are shown in the bottom panels for each of the four simulations considered. Clearly, all cases show that by t/tA ∼ 2.0 the simulations have +reached stationarity. +B1-B3 in Table 2). Figure 7 compares the electron-energy spectra for a case with σ = 0.3 and β = 3 × 10−4 when varying the +number of electron-skin depths per cell, i.e., de/dx = 3 − 12. Clearly, the main features of the electron-energy spectra and in +particular the slope are very similar for the three different resolutions. Indeed, the relative differences between the three spectra +are ≲ 6.0% and thus even smaller than the variations due to the stochastic nature of turbulence, which can cause variation in κ +up to ∼ 10.0% (Ball et al. 2018). + +0.1 +100 +Cp +/Np(I +- +10-3 +- +10-4 +- +1 +10- +102 +103 +10.5 +1.0 +1.5 +2.0 +2.5 +t/tA10-1 +10-3 +10-5 +0.5 +1.0 +1.5 +2.0 +2.5 +t/tA9= +:0.3 += 0.11 +- +- +. +102 +103 +104 +-10.5 +1.0 +1.5 +2.0 +2.5 +t/tA1.0 +100 +10-1 +Cp +/NP(L +10-3 +10-4 +- +10 +103 +104100 +10-4 +0.5 +1.0 +1.5 +2.0 +2.5 +t/tAg= +3.0 += 0.001 +- +- +- +- +- +- +- +- +- +102 +103 +104 +105 +一MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE +11 +101 +102 +103 +104 +γ − 1 +10−5 +10−4 +10−3 +10−2 +10−1 +100 +(γ − 1)dN/dγ +de/dx = 3 +de/dx = 6 +de/dx = 12 +Figure 7. Electron-energy spectra with σ = 0.3 and β = 3 × 10−4 for three different resolutions de/dx = 3, 6 and 12, using a physical box +size of L/de = 2730. The spectra are computed at t/tA = 2.0 and clearly show to be nearly insensitive to the increased resolution. +In Figure 8 we show the joint PDFs for the ratio of temperatures T and the plasma βtot = βe + βp for the same runs. In +the inset we report a zoom-in of the central region of the PDFs at the final time of t = 2 tA. The color-coded contour report +the 90%-value for each distribution, while the circle represent the maximum of each joint PDF. One can see that for the three +different resolutions we obtain similar final distributions, with a variation in T ≲ 5.0%. +As a concluding remark, we note that the power spectrum in Fig. 5 has been computed on a down-sampled grid of (1024)2 +points and not on the full-resolution data of (16348)2 points. This coarse-graining operation is routinely done in such expensive +simulations, and for two distinct reasons. First, the large particle noise due to the high temperatures reached essentially blurs out +the smallest scales, so that using the full resolution does not really provide any additional information. Second, the downsampling +allows us to reduce by a factor of 162 ∼ 250 the space needed for the ouput (we recall that we save data for 38 fields at very high +cadence). As a result, while the simulation maximum wavenumber is kmax de = 9.4 and is not shown in the spectrum in Fig. 5, +the maximum wavenumber in the downsampled spectrum is kmax de = 0.6 and is well-captured. +REFERENCES +Abarca, D., Klu´zniak, W., & Sa¸dowski, A. 2018, MNRAS, 479, +3936, doi: 10.1093/mnras/sty1602 +Anantua, R., Ressler, S., & Quataert, E. 2020, Mon. Not. R. +Astron. Soc., 493, 1404, doi: 10.1093/mnras/staa318 +Arzamasskiy, L., Kunz, M. W., Chandran, B. D. G., & Quataert, E. +2019, ApJ, 879, 53, doi: 10.3847/1538-4357/ab20cc +Ball, D., Sironi, L., & ¨Ozel, F. 2018, Astrophys. J., 862, 80, +doi: 10.3847/1538-4357/aac820 +Bandyopadhyay, B. 2022, Nature Astronomy, 6, 14, +doi: 10.1038/s41550-021-01535-5 +C¸ ıkınto˘glu, S., Eks¸i, K. Y., & Rezzolla, L. 2022, arXiv e-prints, +arXiv:2204.12275. https://arxiv.org/abs/2204.12275 +Cerri, S. S., Servidio, S., & Califano, F. 2017, ApJL, 846, L18, +doi: 10.3847/2041-8213/aa87b0 +Cerutti, B., Philippov, A., Parfrey, K., & Spitkovsky, A. 2015, +Mon. Not. R. Astron. Soc., 448, 606, doi: 10.1093/mnras/stv042 +Cerutti, B., & Werner, G. 2019, Zeltron: Explicit 3D relativistic +electromagnetic Particle-In-Cell code, Astrophysics Source +Code Library, record ascl:1911.012. http://ascl.net/1911.012 +Chatterjee, K., Markoff, S., Neilsen, J., et al. 2021, Mon. Not. R. +Astron. Soc., 507, 5281, doi: 10.1093/mnras/stab2466 +Comisso, L., & Sironi, L. 2018, Phys. Rev. Lett., 121, 255101, +doi: 10.1103/PhysRevLett.121.255101 +—. 2019, ApJ, 886, 122, doi: 10.3847/1538-4357/ab4c33 +Cruz-Osorio, A., Fromm, C. M., Mizuno, Y., et al. 2022, Nature +Astronomy, 6, 103, doi: 10.1038/s41550-021-01506-w +Das, P., Porth, O., & Watts, A. 2022, arXiv e-prints, +arXiv:2204.00249. https://arxiv.org/abs/2204.00249 +Davelaar, J., Olivares, H., Porth, O., et al. 2019, Astron. +Astrophys., 632, A2, doi: 10.1051/0004-6361/201936150 + +12 +MERINGOLO ET AL. +10−4 +10−3 +10−2 +10−1 +100 +βtot +100 +101 +T ≡ Te/Tp +t = 0 +t = 2tA +de/dx = 3 +de/dx = 6 +de/dx = 12 +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +joint PDF +Figure 8. Initial and final values at time t = 2 tA of the normalized joint PDFs of the temperature ratio T and of the total plasma β (see Figure +1), using three different resolutions, namely de/dx = 3, 6, and 12. The inset shows the 90% contour lines of the joint PDFs, while the circle +mark the maxima of each distribution. Note that all PDFs converge to the same final area in the (T , βtot) plane. +Del Zanna, L., Tomei, N., Bugli, M., & Bucciantini, N. 2020, in +Journal of Physics Conference Series, Vol. 1623, Journal of +Physics Conference Series, 012004, +doi: 10.1088/1742-6596/1623/1/012004 +Dihingia, I. K., Mizuno, Y., Fromm, C. M., & Rezzolla, L. 2022, +arXiv e-prints, arXiv:2206.13184. +https://arxiv.org/abs/2206.13184 +Dong, C., Wang, L., Huang, Y.-M., Comisso, L., & Bhattacharjee, +A. 2018, Phys. Rev. Lett., 121, 165101, +doi: 10.1103/PhysRevLett.121.165101 +Drake, J. F., Cassak, P. A., Shay, M. A., Swisdak, M., & Quataert, +E. 2009, Astrophys. J. Lett., 700, L16, +doi: 10.1088/0004-637X/700/1/L16 +Event Horizon Telescope Collaboration, Akiyama, K., Alberdi, A., +et al. 2019a, Astrophys. J. Lett., 875, L5, +doi: 10.3847/2041-8213/ab0f43 +—. 2019b, Astrophys. J. Lett., 875, L1, +doi: 10.3847/2041-8213/ab0ec7 +—. 2022a, Astrophys. J. Lett., 930, L16, +doi: 10.3847/2041-8213/ac6672 +—. 2022b, Astrophys. J. Lett., 930, L12, +doi: 10.3847/2041-8213/ac6674 +Franci, L., Landi, S., Matteini, L., Verdini, A., & Hellinger, P. +2016, Astrophys. J., 833, 91, doi: 10.3847/1538-4357/833/1/91 +Fromm, C. M., Mizuno, Y., Younsi, Z., et al. 2021, Astron. +Astrophys., 649, A116, doi: 10.1051/0004-6361/201937335 +Fromm, C. M., Cruz-Osorio, A., Mizuno, Y., et al. 2022, A&A, +660, A107, doi: 10.1051/0004-6361/202142295 +Haynes, C. T., Burgess, D., & Camporeale, E. 2014, Astrophys. J., +783, 38, doi: 10.1088/0004-637X/783/1/38 +Hellinger, P., Matteini, L., Landi, S., et al. 2015, Astrophys. J. +Lett., 811, L32, doi: 10.1088/2041-8205/811/2/L32 +Howes, G. G. 2010, Mon. Not. R. Astron. Soc., 409, L104, +doi: 10.1111/j.1745-3933.2010.00958.x +Huang, Y.-M., & Bhattacharjee, A. 2016, Astrophys. J., 818, 20, +doi: 10.3847/0004-637X/818/1/20 +Janssen, M., Falcke, H., Kadler, M., et al. 2021, Nature Astronomy, +5, 1017, doi: 10.1038/s41550-021-01417-w +Karimabadi, H., Roytershteyn, V., Wan, M., et al. 2013, Physics of +Plasmas, 20, 012303, doi: 10.1063/1.4773205 +Kawazura, Y., Barnes, M., & Schekochihin, A. A. 2019, +Proceedings of the National Academy of Science, 116, 771, +doi: 10.1073/pnas.1812491116 +Kawazura, Y., Schekochihin, A. A., Barnes, M., et al. 2020, arXiv +e-prints, arXiv:2004.04922. https://arxiv.org/abs/2004.04922 +Mizuno, Y., Fromm, C. M., Younsi, Z., et al. 2021, MNRAS, 506, +741, doi: 10.1093/mnras/stab1753 +Mo´scibrodzka, M., Falcke, H., & Shiokawa, H. 2016, Astron. +Astrophys., 586, A38, doi: 10.1051/0004-6361/201526630 +Nathanail, A., Fromm, C. M., Porth, O., et al. 2020, MNRAS, 495, +1549, doi: 10.1093/mnras/staa1165 +N¨attil¨a, J., & Beloborodov, A. M. 2022, PhRvL, 128, 075101, +doi: 10.1103/PhysRevLett.128.075101 + +MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE +13 +Parashar, T. N., Matthaeus, W. H., & Shay, M. A. 2018, Astrophys. +J. Lett., 864, L21, doi: 10.3847/2041-8213/aadb8b +Parfrey, K., & Tchekhovskoy, A. 2017, Astrophys. J. Lett., 851, +L34, doi: 10.3847/2041-8213/aa9c85 +Pecora, F., Pucci, F., Lapenta, G., Burgess, D., & Servidio, S. 2019, +SoPh, 294, 114, doi: 10.1007/s11207-019-1507-6 +Pecora, F., Servidio, S., Greco, A., et al. 2018, Journal of Plasma +Physics, 84, 725840601, doi: 10.1017/S0022377818000995 +Pezzi, O., Pecora, F., Le Roux, J., et al. 2021, Space Sci. Rev., 217, +39, doi: 10.1007/s11214-021-00799-7 +Porth, O., Chatterjee, K., Narayan, R., et al. 2019, Astrophys. J. +Supp., 243, 26, doi: 10.3847/1538-4365/ab29fd +Qian, Q., Fendt, C., & Vourellis, C. 2018, Astrophys. J., 859, 28, +doi: 10.3847/1538-4357/aabd36 +Radice, D., & Rezzolla, L. 2013, Astrophys. J., 766, L10, +doi: 10.1088/2041-8205/766/1/L10 +Rezzolla, L., & Zanotti, O. 2013, Relativistic Hydrodynamics +(Oxford, UK: Oxford University Press), +doi: 10.1093/acprof:oso/9780198528906.001.0001 +Ripperda, B., Liska, M., Chatterjee, K., et al. 2022, Astrophys. J. +Lett., 924, L32, doi: 10.3847/2041-8213/ac46a1 +Ripperda, B., Bacchini, F., Porth, O., et al. 2019, Astrophys. J., +Supp., 244, 10, doi: 10.3847/1538-4365/ab3922 +Rowan, M. E., Sironi, L., & Narayan, R. 2017, Astrophys. J., 850, +29, doi: 10.3847/1538-4357/aa9380 +Sahraoui, F., Goldstein, M. L., Robert, P., & Khotyaintsev, Y. V. +2009, PhRvL, 102, 231102, +doi: 10.1103/PhysRevLett.102.231102 +Servidio, S., Matthaeus, W. H., Shay, M. A., Cassak, P. A., & +Dmitruk, P. 2009, Phys. Rev. Lett., 102, 115003, +doi: 10.1103/PhysRevLett.102.115003 +Servidio, S., Valentini, F., Califano, F., & Veltri, P. 2012, Phys. +Rev. Lett., 108, 045001, doi: 10.1103/PhysRevLett.108.045001 +Tchekhovskoy, A., & McKinney, J. C. 2012, Mon. Not. R. Astron. +Soc., 423, L55, doi: 10.1111/j.1745-3933.2012.01256.x +Tu, C. Y., & Marsch, E. 1997, Solar Physics, 171, 363, +doi: 10.1023/A:1004968327196 +Valentini, F., Servidio, S., Perrone, D., et al. 2014, Physics of +Plasmas, 21, 082307, doi: 10.1063/1.4893301 +van der Holst, B., Manchester, W. B., I., Frazin, R. A., et al. 2010, +Astrophys. J., 725, 1373, doi: 10.1088/0004-637X/725/1/1373 +Werner, G. R., Uzdensky, D. A., Begelman, M. C., Cerutti, B., & +Nalewajko, K. 2018, MNRAS, 473, 4840, +doi: 10.1093/mnras/stx2530 +Younsi, Z., Porth, O., Mizuno, Y., Fromm, C. M., & Olivares, H. +2020, in Perseus in Sicily: From Black Hole to Cluster Outskirts, +ed. K. Asada, E. de Gouveia Dal Pino, M. Giroletti, H. Nagai, & +R. Nemmen, Vol. 342, 9–12, doi: 10.1017/S1743921318007263 +Zhdankin, V. 2021, ApJ, 922, 172, doi: 10.3847/1538-4357/ac222e +Zhdankin, V., Uzdensky, D. A., Werner, G. R., & Begelman, M. C. +2019, Physical review letters, 122, 055101 +Zhdankin, V., Werner, G. R., Uzdensky, D. A., & Begelman, M. C. +2017, PhRvL, 118, 055103, +doi: 10.1103/PhysRevLett.118.055103 + diff --git a/FtE0T4oBgHgl3EQfzQIy/content/tmp_files/load_file.txt b/FtE0T4oBgHgl3EQfzQIy/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..bbf12a41f68251e62b30b0036200feb9e1035e40 --- /dev/null +++ b/FtE0T4oBgHgl3EQfzQIy/content/tmp_files/load_file.txt @@ -0,0 +1,1327 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf,len=1326 +page_content='DRAFT VERSION JANUARY 10,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2023 Typeset using LATEX twocolumn style in AASTeX63 Microphysical plasma relations from kinetic modelling of special-relativistic turbulence CLAUDIO MERINGOLO,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2 ALEJANDRO CRUZ-OSORIO,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 LUCIANO REZZOLLA,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4 AND SERGIO SERVIDIO2 1Institut f¨ur Theoretische Physik,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Goethe Universit¨at,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Frankfurt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Germany 2Dipartimento di Fisica,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Universit`a della Calabria,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' I-87036 Cosenza,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Italy 3Frankfurt Institute for Advanced Studies,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Frankfurt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Germany 4School of Mathematics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Trinity College,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Dublin,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Ireland ABSTRACT The microphysical,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' kinetic properties of astrophysical plasmas near accreting compact objects are still poorly understood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' For instance, in modern general-relativistic magnetohydrodynamic simulations, the relation be- tween the temperature of electrons Te and protons Tp is prescribed in terms of simplified phenomenological models where the electron temperature is related to the proton temperature in terms of the ratio between the gas and magnetic pressures, or β parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' We here present a very comprehensive campaign of two-dimensional kinetic Particle-In-Cell (PIC) simulations of special-relativistic turbulence to investigate systematically the mi- crophysical properties of the plasma in the trans-relativistic regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Using a realistic mass ratio between elec- trons and protons, we analyze how the index of the electron energy distributions κ, the efficiency of nonthermal particle production E, and the temperature ratio T := Te/Tp, vary over a wide range of values of β and σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' For each of these quantities, we provide two-dimensional fitting functions that describe their behaviour in the relevant space of parameters, thus connecting the microphysical properties of the plasma, κ, E, and T , with the macrophysical ones β and σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In this way, our results can find application in wide range of astrophysical scenarios, including the accretion and the jet emission onto supermassive black holes, such as M87* and Sgr A*.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Keywords: accretion black holes — jet lunching — kinetic turbulence — magnetic reconnection 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' INTRODUCTION Considerable effort has been dedicated over the last few years to the modeling via general-relativistic simulations of plasma accreting onto supermassive black holes (Nathanail et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Del Zanna et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Ripperda et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Younsi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Dihingia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022) and neutron stars (Parfrey & Tchekhovskoy 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Abarca et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Das et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' C¸ ıkınto˘glu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Among the different ap- proaches considered, surely general-relativistic magnetohy- drodynamics (GRMHD) simulations have been the focus of many groups worldwide (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Event Horizon Telescope Collaboration et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Porth et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Event Hori- zon Telescope Collaboration et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' While essential to make theoretical progress on these scenarios, these GRMHD simulations can only describe the dynamically important part of the fluid, the protons (or “ions” as they are sometimes referred to), leaving completely undetermined the physical Corresponding authors: Claudio Meringolo, Alejandro Cruz-Osorio claudiomeringolo@unical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='it, osorio@itp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='uni-frankfurt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='de properties – such as the energy distribution, the number den- sities, and the temperatures – of the “lighter” part of the fluid, namely, the electrons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This represents a serious limitations for two different reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' First, in hot, ionized plasma jets around black holes, the Coulomb coupling between electrons and protons is inefficient, so that protons and electrons are likely to have distinct temperatures, as it happens in the solar wind (Tu & Marsch 1997;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' van der Holst et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Howes 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Dihingia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Second, a proper knowledge of the electron energy distribution is essential in order to ob- tain accurate imaging of supermassive black holes and hence compare with the observations (Davelaar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Mizuno et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Cruz-Osorio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' To cope with this problem, a number of phenomenological prescriptions have been suggested in the literature to relate the electron temperature to the simulated proton temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In this context, a very commonly employed approach is the so-called R−β model (Mo´scibrodzka et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2016), where the electrons temperature is related to the protons temperature in terms of the plasma-β parameter, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', the ratio of the thermal- to-magnetic pressure, and of two free parameters, Rlow and Rhigh (see also Anantua et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020, for a critical-β model, arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='02669v1 [astro-ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='HE] 6 Jan 2023 2 MERINGOLO ET AL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' where two additional parameters are introduced).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The R−β approach has been widely used by the Event Horizon Tele- scope (EHT) Collaboration to reconstruct theoretically the first images of supermassive black holes M87* (Event Hori- zon Telescope Collaboration et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019b), and Sgr A* (Event Horizon Telescope Collaboration et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' These inves- tigations, in particular, have resorted to a simplified version of the R−β approach in which Rlow = 1 and spanning differ- ent values of Rhigh (Event Horizon Telescope Collaboration et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019a, 2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Taking into account a more realistic de- scription of the plasma parameters using self-consistent ki- netic models has shown that finer details of the image can appear, but also that the R−β approach is remarkably ro- bust (Mizuno et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Clearly, it is essential to connect the microphysical prop- erties of the plasma with the macrophysical ones β and σ, where hybrid-kinetic models might have some limitations (Arzamasskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Valentini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Cerri et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' To this scope, we have performed 38 large-scale fully kinetic (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', both protons and electrons are treated as parti- cles) Particle-In-Cell (PIC) simulations of special-relativistic plasma in the so-called “trans-relativistic regime”, that is, when the plasma magnetization σ – the ratio between the magnetic energy density to enthaply density (see below for a definition) – is of order unity (Ripperda et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Mizuno et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Bandyopadhyay 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Janssen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021), and covering four orders of magnitude in the plasma-β parameter (see Appendix for details on the various simulations).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In all simulations, we employ a physical proton-to-electron mass ratio (see Rowan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2017, for the importance of using a realistic mass ratio), and analyze the most important micro- physical properties of the turbulent plasma, namely, the spec- tral index of the electron energy distributions κ, the efficiency in the production of nonthermal particles E, and the temper- ature ratio T := Te/Tp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The parameter ranges explored here overlap and extend those considered in previous and influen- tial works of astrophysical kinetic turbulence (Zhdankin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Zhdankin 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Kawazura et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Exploiting the large coverage of the space of parameters, we are able to model via analytic fitting functions the be- haviour of all of these quantities, thus providing a conve- nient tool to introduce kinetic effects in global GRMHD sim- ulations of accretion onto compact objects, thus improving the modeling of radiatively inefficient accretion flows around black holes, such as M87* or Sgr A* (Tchekhovskoy & McKinney 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Qian et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Porth et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Cruz- Osorio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Chatterjee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Ripperda et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Event Horizon Telescope Collaboration et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' SIMULATION SETUP To carry out our investigation and to simulate the full de- velopment of relativistic turbulence in kinetic plasmas, we use the publicly available Zeltron code (Cerutti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Cerutti & Werner 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In particular, we employ a two-dimensional (2D) geometry in Cartesian coordinates re- taining, as in any fully consistent plasma model, the three- dimensional components of the magnetic and electric fields, of the current density, and of the pressure tensor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The tem- perature of each species, α = e, p for electrons and pro- tons, is specified through the plasma-β parameter βα := 8πnαkBTα/B2 0, where nα and Tα are the number densities and the temperatures, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Here, B0 = (0, 0, B0) is the magnetic-field vector in the ambient plasma (B0 = const.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' ), and kB is the Boltzmann constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' We initialize all of our simulations with the same number density for electrons and protons in a computational domain that is a Cartesian box of side Lx = Ly = L = 16384 dx, where dx = de/3 is the cell resolution and de := c/ωpe is the electron-skin depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In the above, c is the speed of light and ωpe := � (4πnee2)/me [1 + θe/(Γe − 1)]−1/2 is the elec- tron plasma frequency, me the electron mass, Γe the electron adiabatic index, and θe := kBTe/mec2 the dimensionless electron temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' We have also carried out three more simulations with a smaller box, L = 2730 de, and resolution of (8192)2, (16384)2, and (32768)2 points (see Appendix for details).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Furthermore, we set up our computational box so that it is periodic in the x- and in the y-directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Finally, in all our simulations, each computational cell is initialized with 10 particles per cell (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 5 electrons and 5 protons).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As a result, during our evolutions we follow the dynamics of ∼ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7 × 109 particles per simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' We initialize our system as done in typical simulations of plasma turbulence (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Servidio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The ini- tial conditions consist of a relativistic plasma perturbed by a 2D spectrum of Fourier modes for the magnetic field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' To avoid any compressive activity, neither density perturbations nor parallel variances (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', with components out of plane) are imposed at t = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In practice, we start from express- ing the z-component of the vector potential in Fourier modes as Az(x, y) := � kx,ky Ak exp [i(k · x + φk)], where k = (kx, ky) is the wavevector with modulus k = |k| = (2π/L)m (m is the dimensionless wavenumber), and φk are randomly chosen phases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The amplitude of the modes is set as Ak = � 1 + (k/k0)15/3�−1, such that it is peaked at k0 = (2π/L)m0 with m0 = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The spectrum is set to zero at m > 7 in order to construct initial conditions consis- tent with random large-scale structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The magnetic-field components Bx and By are then computed by straightfor- ward derivatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Finally, to explore a regime of strongly per- turbed field lines, we fix the amplitude of the fluctuations to be ⟨B⊥⟩/B0 ∼ 1, where ⟨B⊥⟩ is the root-mean-square value of the in-plane fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This choice leads to a broader particle energy distribution, while in N¨attil¨a & Beloborodov MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE 3 10−4 10−3 10−2 10−1 100 βtot 10−1 100 101 T := Te/Tp T0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 T0 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 T0 = 10 t = 2 tA 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 joint PDF Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Initial and final values at time t = 2 tA of the normalized joint PDFs of the temperature ratio T and of the total β parameter, βtot := βe + βp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The data refers to three representative simulations with initial temperature ratio T0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 and 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The inset shows the 90% contour lines of the joint PDFs, while the circles mark the maxima of each PDF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note that all PDFs converge to the same final area in the (T , βtot) plane despite the very different initial data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' (2022) the authors showed that when the amplitude is small, the particle energy distribution is quasi-thermal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Other quantities that will be referred to in the rest of the paper are the Alfv´en crossing time as tA := L/vA, where vA := c � σ/(1 + σ) is the Alfv´en speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The plasma mag- netization is instead defined as σ := B2 0/(4πw), where w is the enthalpy density of the plasma w := (ρe+ρp)c2+Γeϵe+ Γpϵp, with ρe,p and ϵe,p being, respectively, the rest-mass densities and the internal energy densities of electrons and protons when following an ideal-fluid equation of state (Rez- zolla & Zanotti 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As the simulation proceeds, turbulent magnetic reconnec- tion takes place, leading to a nonlinear change in magnetic topology and converting magnetic energy into kinetic and in- ternal energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This process strongly affects the dynamics of the plasma on all the scales we could reproduce with our sim- ulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This highly dynamical system evolves with mag- netic flux ropes moving, colliding, and sometimes repelling each other depending on the magnetic-field polarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This dy- namics proceeds till a stationary state is achieved after about an Alfv´en crossing time (see also the top panels of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2 and the Appendix for a detailed discussion).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' RESULTS At the initial time, after fixing β and σ for each simula- tion, we set the temperatures, which are uniform for both species.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In particular, we first specify the proton-βp param- eter and then obtain the electron temperature so as to have a specific initial temperature ratio T0 := Te,0/Tp,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' At any time during the simulation we measure the spatial distribu- tions in the (x, y) plane of βtot := βe + βp and T , from which we compute the joint PDFs for the two quantities, namely, (βtot, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The temperature for each species is com- puted from Tα := pα/nαkB, where pα is the isotropic pres- sure, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', pα := 1 3(pxx α + pyy α + pzz α ) and pij α is the pressure tensor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 1, where we report the joint PDFs at the initial (t = 0) and final (t = 2 tA) times for three representative simulations initialized respectively with T0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0, and 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Clearly, the three initial setups have differ- ent joint PDFs narrowly distributed around the three initial values of the temperature ratio T0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Interestingly, however, at the final time they have all converged to the same equilib- rium distribution, irrespective of the initial data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This can be best appreciated in the inset, which reports a zoom-in of the central region of the final distributions, with the color-coded contour reporting the 90%-value for each simulation, while the circles represent the maximum of each joint PDF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This convergence has been verified to take place for four different values of the initial temperature ratio (T0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='01, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 and 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0), while keeping σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 and β = 3 × 10−4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The behaviour in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 1 induces us to conjecture that the choice of the initial temperature T0 is effectively unimpor- tant at least in the ranges explored here1 as its memory is lost by the time the system has reached a steady state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In view of this, we set T0 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 for the 35 simulations per- formed varying σ and β (note that with such initial temper- ature ratio, the plasma-β parameter is the same for electrons and protons, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', βe = βp =: β).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The ranges of σ and β ex- plored are compatible with previous kinetic studies, state-of- the-art GRMHD simulations, and radiative-transfer calcula- tions (Ball et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Cruz-Osorio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Fromm et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As also noted by Pecora et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' (2019), higher values of β would require a much higher number of particles to counter the statistical noise, making purely PIC calculations of this type computationally expensive with modern resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Figure 2 provides a very compact but powerful overview of the fully developed turbulent state for a simulation with σ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 and β = 3 × 10−3, at time t = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 tA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Each up- per panel is split into two regions reporting different plasma properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Panel (a) shows the electron number density ne normalized to the initial number of particles per cell n0 (left), and the magnetization σ (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Panel (b), instead, re- ports temperature ratio T (left) and the out-of-plane electric- 1 A word of caution: we have shown the initial temperature to be irrelevant once turbulence is developed for a specific set of initial values of β and σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Given the physical arguments given above, extending this conclusion to different initial values is a conjecture that is reasonable but challenging to prove, especially for β ∼ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4 MERINGOLO ET AL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' current density Jz (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note how, in analogy to nonrela- tivistic kinetic simulations, vortex-like and sheet-like struc- tures corresponding to magnetic flux tubes are present at all the scales that are resolved in the simulation (Servidio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Comisso & Sironi 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Parashar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Pecora et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' High number-densities “magnetic islands” can be found in large-scale flux tubes, and in general, the density is larger in these coherent quasi-circular structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' At the same time, the largest temperatures (ratios) are not achieved at the center of the islands, which are instead com- paratively cooler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This is because the temperature is higher between flux tubes, where reconnection layers lead to the for- mation of plasmoids within narrow current sheets (Servidio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Comisso & Sironi 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Pezzi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Elon- gated unstable current sheets tend to fragment into chains of plasmoids and small-size current sheets are appear on a wide range of scales (Hellinger et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Dong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Huang & Bhattacharjee 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Notice also that the out-of- plane electric-current density Jz shows a variety of current sheets of different sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Some of these current layers break into smaller plasmoids and these regions are important for the heating of the plasma and the acceleration of the particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The various quantities shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2 are overlaid with the trajectories of some of the most energized particles that we tracked (protons in the left panels and electrons in the right ones).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In particular, we track a sample of 500 electrons and 500 protons during the whole simulation, both randomly chosen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The starting-position of each particle is marked with a star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note how, quite generically, and in addition to the basic gyrations at the corresponding Larmor radii, there are particles that have closed orbits as they are trapped in a flux rope, while others experience turnovers that suddenly bend the trajectory, similarly to what observed in nonrelativistic turbulence simulations (Pecora et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Overall, when a particle experiences a reconnection process and is accel- erated, it increases abruptly its Larmor radius, but also its Lorentz factor γ, and kinetic energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In the lower panels (c) and (d) of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2 we show the evolu- tion of the Lorentz factor of the particles tracked in the upper panels (a) and (b), with protons being reported in panel (c) and electrons in panel (d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As expected, and shown by the different vertical scales of panels (c) and (d), electrons expe- rience considerably larger accelerations when compared to protons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This is simply due to the different masses of the two species: electrons, which have smaller Larmor radius, are more efficiently accelerated by the thin current sheets where magnetic reconnection takes place.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This stochastic acceler- 2 When the turbulence is fully developed, the velocity distribution of the elec- trons is highly nonthermal and their Larmor radius is significantly larger as a result of the large accelerations, and this effectively increases our resolu- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' ation mechanism of multi-reconnection events is very effi- cient and commonly observed in astrophysical plasma turbu- lence (Drake et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2009;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Haynes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The tracked particles start from γ ≳ 1, and most of them experience a sudden acceleration episode, and then a sequence of second-order Fermi-like processes of accelera- tion (Comisso & Sironi 2018, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Particles trapped in magnetic islands show a Lorentz factor increasing in time (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', the red proton in the left panels).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Other particles, in- stead gain energy only once and then reach a quasi-steady state as is typical for particles entering the magnetic island only for a short time and then being bounced in a stochastic manner between different structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Relativistic hydrodynamical turbulence naturally provides a landscape of intermittency and large spatial variance be- cause the compressibility is enhanced by relativistic ef- fects (Radice & Rezzolla 2013);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' in addition, relativistic mag- netohydrodynamical turbulence provides the natural condi- tions to produce extreme-acceleration events and to generate a large population of particles – electrons in particular – with energy distributions that differ significantly from a thermal one (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Zhdankin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This is summarized in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 3, which reports the electron energy-distribution func- tions (spectra) (γ − 1)dN/dγ at t = 2 tA as a function of the Lorentz factor γ − 1, for some representative simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' More specifically, the upper panel shows the electron spectra from simulations with σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 and for a wide range of values of β;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' the black dashed line is a Maxwell-J¨uttner distribution where the value of the dimensionless electron temperature θe := kBTe/(mec2) = 45 is chosen to reproduce the low- energy part of the spectrum for the case β = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='11 and is obviously different for each simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note that the high- energy part of the spectra is well approximated by a power- law dN/dγ ∝ γ−κ+1 (Davelaar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Fromm et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021), whose index κ ≃ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 is quite insensitive to the value of the plasma-β parameter in the range β ≲ 3 × 10−3 (see black dotted line).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' For very large values of β, however, a sin- gle power law does not represent the distribution accurately, and only the very high-energy part of the spectrum maintains an index κ ≃ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In the bottom panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 3, we instead explore how the electron-energy spectra change when varying σ while keep- ing β = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note that as the magnetization increases, the amount of magnetic energy available for dissipation in- creases, leading to a systematic shift towards progressively larger energies of the spectra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Furthermore, the high-energy part of the spectra are well approximated by power laws with indexes κ ≃ 3 − 4, while the highest regions of the spec- tra terminate with increasingly harder slopes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Overall, and in agreement with several previous works (Comisso & Sironi 2018) – some of which even have different initial conditions (Werner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Ball et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018) – our results clearly in- MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE 5 0 1 2 3 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 ne/n0 log10 σ (a) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 t [tA] 0 1 2 γ − 1 protons (c) −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 log10 T Jz (b) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 t [tA] 0 1000 2000 3000 electrons (d) Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Representative quantities in a fully developed 2D turbulence at t = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 tA for a representative simulation with σ = 1 and β = 3 × 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The top panels offer a dual view of: the electron number density normalized to the initial value ne/n0 and of the magnetization σ [panel (a)], and of temperature ratio T and of the total current density Jz [panel (b)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Also overplotted with different colors are representative particle trajectories, with protons on the left and electrons on the right of each panel (the the initial position of each particle is marked with a star).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The lower panels [(c) and (d)] report instead the evolution of the Lorentz factor for the same particles marked above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' dicate that turbulence promotes the particle acceleration, pro- ducing energy distributions that contain a considerable frac- tion of very energetic (suprathermal) particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Given the kinetic behaviour of the plasmas described so far, it is essential to be able to express their properties via an- alytic fitting functions and in terms of the basic parameters of the plasma, namely, β, σ, so that the resulting expressions can then be employed directly in the GRMHD modelling of astrophysical plasma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A summary of this analytical mod- elling is presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4, where in the top row we show as a function of β and σ, respectively, the electron spectral index κ, the nonthermal energy efficiency E, and the temperature ratio T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note that the data reported in the first two columns refers to simulations at t = 2 tA, while that in the right col- umn is averaged over the time window 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7 < t/tA < 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 to avoid the oscillations introduced by the stochastic behav- ior of turbulence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Similarly, the bottom row of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4 reports one-dimensional cuts of the same quantities, but at fixed val- ues of the magnetization (σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 − 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0), where each circle refers to a distinct simulation of our set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note that for any fixed value of σ we explored plasma parameters up to the maximum one βmax ∼ 1/(4σ) (Ball et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018), where our estimates are inevitably less accurate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Exploiting the large set of simulations performed, we can now construct analytical 2D fits to the various quantities, starting with the electron spectral index κ(β, σ), which can be expressed as κ(β, σ) = k0 + k1 √σ + k2 σ−6/10 tanh � k3 β σ1/3� , (1) where k0 = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8, k1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2, k2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6 and k3 = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='25 (see top-left panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note that Zhdankin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' (2017) have proposed a similar but simpler fitting expression which depends σ only and thus does not account for variations in the plasma β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Overall, the spectral index shows two main 5000 4000 3000 2000 S 1000 1000 20009 300.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='09 3000 4000 5000 [de]5000 - 4000 3000 2000 1000 1000 20003000 4000 5000 [de]6 MERINGOLO ET AL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 104 105 106 107 108 109 (γ − 1)dN/dγ σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 β = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 × 10−4 β = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 × 10−4 β = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 × 10−3 β = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 × 10−3 β = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 × 10−2 β = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 × 10−1 Maxwell-J¨uttner, θe = 45 κ = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 101 102 103 104 γ − 1 104 105 106 107 108 109 (γ − 1)dN/dγ β = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='01 σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 σ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 σ = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Top panel: electron-energy spectra at t = 2 tA for sim- ulations with σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 and different values of β;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' indicated with a dashed line is the a Maxwell-J¨uttner distribution for β ≃ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1, while the dotted line indicates the almost constant spectral index κ ≃ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Bottom panel: Same as above, but for simulations with β = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='01 and different values of σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' First, at fixed σ, the spectral index is essentially independent of β, for β ≲ 10−2, but it increases at larger values of β, approaching a very steep tail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Second, at fixed β, the index becomes generally smaller for increasing values of σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Next, we quantify the efficiency in the production of par- ticles with nonthermal energies in terms of the weighted av- erage of the excess over a Maxwell-J¨uttner distribution (Ball et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018), namely E := � ∞ γ0 [dN/dγ − fMJ(γ, θ)] (γ − 1) dγ � ∞ γ0 (dN/dγ)(γ − 1) dγ , (2) where γ0 denotes the peak of the spectrum, fMJ := γ2v/[c θeK2(1/θe)]e−γ/θe, with v the velocity and K2 the modified Bessel function of the second kind.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The corresponding 2D fit of the data can then be expressed as E(β, σ) = e0 + e1 √σ + e2 σ1/10 tanh � e3 β σ1/10� , (3) where e0 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0, e1 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='23, e2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 and e3 = −10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='18 (see top-middle panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Also in this case, the energy efficiency shows three main features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' First, for β ≲ 10−2 the efficiency saturates at a value that is independent of β, but systematically larger for higher values of σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Second, for high values of β and low values of σ, it approaches E ∼ 0, because the electron spectrum becomes significantly softer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Third, for higher values of σ, the efficiency is the largest, since the spectra widen to larger electron energies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Interest- ingly, these results are similar to the ones found by Ball et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' (2018) when using different initial conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Finally, we consider what is arguably the most important quantity modelled in our simulations, namely, the depen- dence of the temperature ratio on the plasma properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The corresponding 2D fit is given by T (β, σ) = t0 + t1 στ1 tanh [t2 β στ2] + t2 στ3 tanh [t3 βτ4 σ] , (4) where t0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4, t1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='25, t2 = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='75, t3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='037, and τ1 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5, τ2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='95, τ3 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3, τ4 = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='05 (see top- right panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Overall, it is easy to see that for low magnetizations, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', σ ∈ [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3], and small values of the β parameter, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', β ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='01, the temperature ratio is essen- tially constant and then starts to grow to values as large as T ≃ 1 for β ≲ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' On the other hand, for high values of the magnetization, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', σ ≃ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0, the behavior is quite the op- posite, the values of T are higher for lower β and decrease when increasing β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' For intermediate values of the magne- tization, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', σ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0, the behavior is a combination of the two described above, showing a nonmonotonic dependence for β ∈ [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='01, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Interestingly, in all cases, T ∼ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 for β ≃ 1, independently of the value of σ, thus highlight- ing that, under these conditions, electrons and protons are fully coupled and have roughly the same temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Con- versely, for β ≲ 10−4, the temperature ratio will depend on the plasma magnetization, being larger for larger magnetiza- tions, as expected for regimes where electrons can be accel- erated to suprathermal energies at reconnection sites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' More importantly, expression (4) provides a compact and micro- physically consistent description of the electron temperatures that can be employed in modern GRMHD codes of accretion flows onto black holes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' We conclude the discussion of our results by returning to the behaviour of the electron spectral index κ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As shown in the top-left panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4 and summarized in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' (1), electron acceleration is higher in low-β and high-σ turbulent plasmas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As suggested already by Drake et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' (2009), this behaviour may be due to the interaction of the electron orbits with small-sized current sheets;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' such a mechanism can then extract particles from the thermal population and bring them to very high energies via primary and secondary Fermi-like MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE 7 10−4 10−3 10−2 10−1 100 β 2 4 6 8 10 σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 σ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 σ = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 10−4 10−3 10−2 10−1 100 β 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 10−4 10−3 10−2 10−1 100 β 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 10−1 100 101 σ κ(β, σ) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='85 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='90 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='00 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='25 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='65 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='50 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='00 E(β, σ) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='40 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='90 T (β, σ) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='50 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='15 3 4 5 6 7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Top panels: from the left to right are reported as a function of β and σ: the electron spectral index κ, the energy efficiency E, and the temperature ratio T , respectively [see Eqs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='(1)–(4)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Bottom panels: Same as above, but at fixed values of the magnetization (σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 − 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' each circle refers to a distinct simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' mechanisms (Pecora et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Comisso & Sironi 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In fully developed GRMHD turbulence, accelerating islands and current sheets are present on all scales and these could therefore provide the natural site for the accelerating mecha- nism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In this simple picture, it is natural to expect that the larger the spectrum of fluctuations at small scales, the more effi- cient the accelerating mechanism (Haynes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' To validate whether this applies also to trans-relativistic plas- mas, we have computed the (not normalized) isotropic power spectrum of the magnetic field for three representative simu- lations and reported them in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 5 as a function of the dimen- sionless kde [the inset shows with colored squares the loca- tion in the (σ, β) plane of the three configurations, while the arrows mark the wavevectors associated to the proton-skin depth (kdp = 1) and to the proton Larmor radius (kρp = 1)] and over a downsampled grid of (1024)2 (see Appendix for a discussion).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In essence, after assuming the turbulence to be isotropic and homogeneous, we integrate the 2D Fourier transforms �Bi over concentric shells (in this sense, the power spectrum is isotropic) to obtain one-dimensional spectra, whose sum we plot in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 5 [note that the growth of the power spectrum at large wavenumbers is a typical noise ef- fect of PIC simulations due to a finite number of particles, (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Karimabadi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2013)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In general, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 5 reveals a number of interesting features, moving in the parameter space from (low-β, high-σ) to (high- β, low-σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' First, the power spectrum is clearly higher in the case of the low-β, high-σ simulation, confirming a more effi- cient cascade process (Franci et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Second, the spec- trum is shallower in the sub-ion inertial range (Sahraoui et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2009) indicating a more developed turbulence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Finally, and more interestingly, the turbulent cascades terminate at much smaller scales for (low-β, high-σ) simulations, suggesting the existence of thinner current sheets at subproton scales that accelerate particles more efficiently (Pecora et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' DISCUSSION AND CONCLUSIONS With the goal of gaining a deeper understanding of the properties of plasmas near astrophysical compact ob- jects, we have employed the PIC Zeltron code to carry out a large campaign of two-dimensional simulations of special-relativistic, decaying plasma turbulence in the trans- relativistic regime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Particularly important in our analysis is the use of a physical mass ratio between electrons and pro- 8 MERINGOLO ET AL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 10−2 10−1 kde 10−1 100 101 102 103 104 105 Power Spectrum kρp = 1 kρp = 1 kdp = 1 k−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='45 k−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='56 k−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='75 σ = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 β = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0001 σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 β = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 β = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 β σ Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Magnetic-field power spectra for three simulations sam- pling important locations in the (β, σ) space of parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Each simulation is marked with a different color and the corresponding location is shown in the inset, which reports also the electron spec- tral index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Black dashed lines indicate the turbulent power laws, while the circles delimit the boundaries of each turbulent range, which we define as the limits of the power-law scaling;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' the arrows mark the wavevectors associated to the proton-skin depth (kdp = 1) and to the proton Larmor radius (kρp = 1), which is outside the horizontal scale for the red line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' tons and the exploration of a wide range of values in the plasma-β parameter (β = 10−4 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5) and in the magnetiza- tion σ (σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Having simulated such a large portion of the space of parameters encountered in astrophysical plas- mas has allowed us to derive analytical fitting functions for the behaviour of a number of important plasma quantities as a function of β and σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' More specifically, we have presented 2D fitting functions of the electron spectral index κ(β, σ), of the efficiency in generating nonthermal particles E(β, σ), and of the ratio between the electron and proton temperatures T (β, σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' These expressions provide compact and reasonably accurate descriptions of the behaviour of these microphys- ical plasma properties and can be employed in a number of scenarios involving compact objects and described by macro- physical plasma characteristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Importantly, since they have been derived from first-principle calculations, they represent a considerable improvement over the rather crude and purely empirical expressions employed at the moment in GRMHD simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Finally, we have confirmed the suggestion that plasmas with low β and large σ naturally lead to broad turbu- lent scenarios and are the most efficient in extracting particles from the thermal population and accelerating them (Pecora et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Comisso & Sironi 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As these simulations represent one of the most systematic PIC explorations of trans-relativistic turbulence, can be em- ployed in a wide range of astrophysical systems, such as jets and accretion disks around supermassive black holes, and, of course, their imaging (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Event Horizon Telescope Collaboration et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019a, 2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The formulas provided in this work can be improved by extending the present two- dimensional treatment to three dimensions and thus assessing the role played by dimensionality in studies of this type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' ACKNOWLEDGMENTS We thank the Referee for the useful comments that have improved our presentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This research is supported by the ERC Advanced Grant “JETSET: Launching, propaga- tion and emission of relativistic jets from binary mergers and across mass scales” (Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 884631), by the Deutsche Forschungsgemeinschaft (DFG, German Research Founda- tion) through the CRC-TR 211 “Strong-interaction matter under extreme conditions” (project number 315477589), and by the State of Hesse within the Research Cluster ELE- MENTS (Project ID 500/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' LR acknowledges the Wal- ter Greiner Gesellschaft zur F¨orderung der physikalischen Grundlagenforschung e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' through the Carl W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Fueck Laure- atus Chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The simulations were performed on HPE Apollo HAWK at the High Performance Computing Center Stuttgart (HLRS) under the grant BNSMIC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' APPENDIX In what follows, we provide additional information on our analysis concentrating on three specific aspects: a detailed summary of the properties of the simulations carried out in the campaign, the evidence that stationarity is reached when extracting the spectral information, and a comparison of simulations with different resolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' SUMMARY OF SIMULATIONS Our systematic investigation of the β, σ space of parameters consists of 35 large-scale, high-resolution simulations whose main properties are reported in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' All these simulations were performed in two spatial dimensions with the real electron-to- proton mass ratio, a physical-box size of L ∼ 5461 de (where de, we recall, is the electron-skin depth) in each of the two spatial directions, and the same electron-to-proton initial temperature, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', T0 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In addition, we have performed six simulations with varying properties with respect to the main ones and reported in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE 9 Run 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 σ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 β 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4e-1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-2 θp 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-2 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8e-2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e-2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 θe 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e-2 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8e0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e1 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3e1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8e2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e-2 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-1 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5e2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e2 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-1 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-1 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='7e2 λD 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1e0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1e1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3e1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='9e1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='9e1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='9e1 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Summary of the physical parameters of our main simulations, which are all performed with the real electron-to-proton mass ratio, equal electron and proton initial temperatures, a resolution of three cells per electron-skin depth (de/dx = 3), and a box of size ∼ 5461 de in both directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' From top to bottom we report: the number of the Run, the magnetization σ, the plasma β, the dimensionless temperatures θp,e for protons and electrons respectively, and the Debye length λD in units of de.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In all our simulations we have initialized each computational cell with 10 particles (5 protons and 5 electrons).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Run A1 A2 A3 B1 B2 B3 σ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 βp 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 βe 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-4 θp 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-5 θe 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='18e0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='18e-1 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='18e-3 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='18e-2 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='18e-2 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='18e-2 T0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e-1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e+1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 de/dx 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0e0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2e+1 L/de 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='46e+3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='46e+3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='46e+3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='73e+3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='73e+3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='73e+3 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Table of simulation in which we varied different parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Runs A1-A3 have different initial T = Tp/Te (and hence different βe and θe), while all other parameters (σ, βp, θp, de/dx, L/de) are the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Runs B1-B3 have different values of the electron-skin depth per dx and use a smaller physical box of 2730 de.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' From top to bottom we report: the number of the Run, the magnetization σ, the proton and electron plasma β, the proton and electron dimensionless temperatures θp,e, the initial temperature ratio T0, the number of cells per electron-skin depth (de/dx), and the physical box size in terms of electron-skin depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As a first test, to show that our final configuration is independent of the initial electron-to-proton temperature, we have varied T0 spanning in the range [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='001−10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0] (see Runs A1-A3 in Table 2, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note that for these configurations, the plasma β is different for electrons and protons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Next, we checked that our results are insensitive to the choice of different (higher) resolutions in terms of de/dx, increasing the resolution up to de/dx = 12 (see Runs B1-B3 in Table 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In the latter case, we have used a physical box of L/de = 2730 in both directions and varied the number of mesh points from (8192)2 up to (32768)2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In this last high-resolution configuration, we have followed the dynamics of ∼ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 × 1011 particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' STATIONARITY OF SPECTRA Next, we provide evidence that the computed electron-energy spectra reach a steady state after t/tA ≳ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8 − 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0, so that the extraction of the spectral index κ and of the efficiency E is both accurate and robust.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Figure 6 shows four representative simulations having different values of σ (see Runs 7, 18, 27, and 31 in Table 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In each case, we plot the electron-energy spectra at different times during the evolution as indicated by the colormap on the right of each of the four panels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Furthermore, marked with black vertical lines of various type are three different values of the Lorentz factor γ − 1 and the corresponding evolutions are shown in the bottom panels for each of the four simulations considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Clearly, all cases show that by t/tA ∼ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 the simulations have reached stationarity with relative time variations that are ≲ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5%, so that κ and E can be extracted reliably.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' RESOLUTION TESTS Finally, we have verified that our results are insensitive to the choice of spatial resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In particular, we have performed three simulations using an increasing number of cells per electron skin depth, from de/dx = 3 up to de/dx = 12 (see Runs 10 MERINGOLO ET AL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 t/tA 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 t/tA Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Four representative simulations in which we show the stationarity of the electron-energy spectra (see Runs 7, 18, 27, and 31 in Table 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' For each simulation, we report the spectra at different times during the evolution as indicated by the colormap on the right of each of the four panels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Marked with black vertical lines of various type are three different values of the Lorentz factor γ − 1 and the corresponding evolutions are shown in the bottom panels for each of the four simulations considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Clearly, all cases show that by t/tA ∼ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 the simulations have reached stationarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' B1-B3 in Table 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Figure 7 compares the electron-energy spectra for a case with σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 and β = 3 × 10−4 when varying the number of electron-skin depths per cell, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', de/dx = 3 − 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Clearly, the main features of the electron-energy spectra and in particular the slope are very similar for the three different resolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Indeed, the relative differences between the three spectra are ≲ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0% and thus even smaller than the variations due to the stochastic nature of turbulence, which can cause variation in κ up to ∼ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0% (Ball et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1 100 Cp /Np(I 10-3 10-4 1 10- 102 103 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 t/tA10-1 10-3 10-5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 t/tA9= :0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='11 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 102 103 104 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 t/tA1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 100 10-1 Cp /NP(L 10-3 10-4 10 103 104100 10-4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='5 t/tAg= 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='001 102 103 104 105 一MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE 11 101 102 103 104 γ − 1 10−5 10−4 10−3 10−2 10−1 100 (γ − 1)dN/dγ de/dx = 3 de/dx = 6 de/dx = 12 Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Electron-energy spectra with σ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3 and β = 3 × 10−4 for three different resolutions de/dx = 3, 6 and 12, using a physical box size of L/de = 2730.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The spectra are computed at t/tA = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 and clearly show to be nearly insensitive to the increased resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In Figure 8 we show the joint PDFs for the ratio of temperatures T and the plasma βtot = βe + βp for the same runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' In the inset we report a zoom-in of the central region of the PDFs at the final time of t = 2 tA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The color-coded contour report the 90%-value for each distribution, while the circle represent the maximum of each joint PDF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' One can see that for the three different resolutions we obtain similar final distributions, with a variation in T ≲ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As a concluding remark, we note that the power spectrum in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 5 has been computed on a down-sampled grid of (1024)2 points and not on the full-resolution data of (16348)2 points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' This coarse-graining operation is routinely done in such expensive simulations, and for two distinct reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' First, the large particle noise due to the high temperatures reached essentially blurs out the smallest scales, so that using the full resolution does not really provide any additional information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Second, the downsampling allows us to reduce by a factor of 162 ∼ 250 the space needed for the ouput (we recall that we save data for 38 fields at very high cadence).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' As a result, while the simulation maximum wavenumber is kmax de = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4 and is not shown in the spectrum in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 5, the maximum wavenumber in the downsampled spectrum is kmax de = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6 and is well-captured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' REFERENCES Abarca, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Klu´zniak, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Sa¸dowski, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018, MNRAS, 479, 3936, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1093/mnras/sty1602 Anantua, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Ressler, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Quataert, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020, Mon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 493, 1404, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1093/mnras/staa318 Arzamasskiy, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Kunz, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Chandran, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Quataert, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, ApJ, 879, 53, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4357/ab20cc Ball, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Sironi, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & ¨Ozel, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 862, 80, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4357/aac820 Bandyopadhyay, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022, Nature Astronomy, 6, 14, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1038/s41550-021-01535-5 C¸ ıkınto˘glu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Eks¸i, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Rezzolla, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022, arXiv e-prints, arXiv:2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='12275.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='org/abs/2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='12275 Cerri, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Servidio, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Califano, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2017, ApJL, 846, L18, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/2041-8213/aa87b0 Cerutti, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Philippov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Parfrey, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Spitkovsky, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2015, Mon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 448, 606, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1093/mnras/stv042 Cerutti, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Werner, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, Zeltron: Explicit 3D relativistic electromagnetic Particle-In-Cell code, Astrophysics Source Code Library, record ascl:1911.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' http://ascl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='net/1911.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='012 Chatterjee, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Markoff, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Neilsen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021, Mon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 507, 5281, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1093/mnras/stab2466 Comisso, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Sironi, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 121, 255101, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='121.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='255101 —.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, ApJ, 886, 122, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4357/ab4c33 Cruz-Osorio, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Fromm, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Mizuno, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022, Nature Astronomy, 6, 103, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1038/s41550-021-01506-w Das, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Porth, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Watts, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022, arXiv e-prints, arXiv:2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='00249.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='org/abs/2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='00249 Davelaar, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Olivares, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Porth, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 632, A2, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1051/0004-6361/201936150 12 MERINGOLO ET AL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 10−4 10−3 10−2 10−1 100 βtot 100 101 T ≡ Te/Tp t = 0 t = 2tA de/dx = 3 de/dx = 6 de/dx = 12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0 joint PDF Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Initial and final values at time t = 2 tA of the normalized joint PDFs of the temperature ratio T and of the total plasma β (see Figure 1), using three different resolutions, namely de/dx = 3, 6, and 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' The inset shows the 90% contour lines of the joint PDFs, while the circle mark the maxima of each distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Note that all PDFs converge to the same final area in the (T , βtot) plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Del Zanna, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Tomei, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Bugli, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Bucciantini, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020, in Journal of Physics Conference Series, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 1623, Journal of Physics Conference Series, 012004, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1088/1742-6596/1623/1/012004 Dihingia, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Mizuno, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Fromm, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Rezzolla, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022, arXiv e-prints, arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='13184.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='org/abs/2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='13184 Dong, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Wang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Huang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Comisso, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Bhattacharjee, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 121, 165101, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='121.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='165101 Drake, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Cassak, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Shay, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Swisdak, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Quataert, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2009, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 700, L16, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1088/0004-637X/700/1/L16 Event Horizon Telescope Collaboration, Akiyama, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Alberdi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019a, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 875, L5, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/2041-8213/ab0f43 —.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019b, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 875, L1, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/2041-8213/ab0ec7 —.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022a, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 930, L16, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/2041-8213/ac6672 —.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022b, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 930, L12, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/2041-8213/ac6674 Franci, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Landi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Matteini, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Verdini, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Hellinger, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2016, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 833, 91, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4357/833/1/91 Fromm, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Mizuno, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Younsi, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021, Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 649, A116, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1051/0004-6361/201937335 Fromm, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Cruz-Osorio, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Mizuno, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022, A&A, 660, A107, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1051/0004-6361/202142295 Haynes, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Burgess, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Camporeale, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2014, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 783, 38, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1088/0004-637X/783/1/38 Hellinger, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Matteini, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Landi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2015, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 811, L32, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1088/2041-8205/811/2/L32 Howes, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2010, Mon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 409, L104, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1111/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1745-3933.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='00958.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='x Huang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Bhattacharjee, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2016, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 818, 20, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/0004-637X/818/1/20 Janssen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Falcke, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Kadler, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021, Nature Astronomy, 5, 1017, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1038/s41550-021-01417-w Karimabadi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Roytershteyn, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Wan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2013, Physics of Plasmas, 20, 012303, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1063/1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4773205 Kawazura, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Barnes, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Schekochihin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, Proceedings of the National Academy of Science, 116, 771, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1812491116 Kawazura, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Schekochihin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Barnes, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020, arXiv e-prints, arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='04922.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='org/abs/2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='04922 Mizuno, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Fromm, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Younsi, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021, MNRAS, 506, 741, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1093/mnras/stab1753 Mo´scibrodzka, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Falcke, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Shiokawa, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2016, Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 586, A38, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1051/0004-6361/201526630 Nathanail, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Fromm, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Porth, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020, MNRAS, 495, 1549, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1093/mnras/staa1165 N¨attil¨a, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Beloborodov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022, PhRvL, 128, 075101, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='128.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='075101 MICROPHYSICAL PLASMA RELATIONS FROM SPECIAL-RELATIVISTIC TURBULENCE 13 Parashar, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Matthaeus, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Shay, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 864, L21, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/2041-8213/aadb8b Parfrey, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Tchekhovskoy, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2017, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 851, L34, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/2041-8213/aa9c85 Pecora, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Pucci, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Lapenta, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Burgess, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Servidio, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, SoPh, 294, 114, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1007/s11207-019-1507-6 Pecora, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Servidio, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Greco, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018, Journal of Plasma Physics, 84, 725840601, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1017/S0022377818000995 Pezzi, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Pecora, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Le Roux, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021, Space Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 217, 39, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1007/s11214-021-00799-7 Porth, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Chatterjee, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Narayan, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Supp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 243, 26, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4365/ab29fd Qian, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Fendt, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Vourellis, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 859, 28, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4357/aabd36 Radice, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Rezzolla, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2013, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 766, L10, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1088/2041-8205/766/1/L10 Rezzolla, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Zanotti, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2013, Relativistic Hydrodynamics (Oxford, UK: Oxford University Press), doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1093/acprof:oso/9780198528906.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='0001 Ripperda, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Liska, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Chatterjee, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2022, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 924, L32, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/2041-8213/ac46a1 Ripperda, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Bacchini, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Porth, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Supp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 244, 10, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4365/ab3922 Rowan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Sironi, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Narayan, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2017, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 850, 29, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4357/aa9380 Sahraoui, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Goldstein, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Robert, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Khotyaintsev, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2009, PhRvL, 102, 231102, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='231102 Servidio, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Matthaeus, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Shay, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Cassak, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Dmitruk, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2009, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 102, 115003, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='115003 Servidio, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Valentini, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Califano, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Veltri, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2012, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 108, 045001, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='045001 Tchekhovskoy, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & McKinney, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2012, Mon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Astron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 423, L55, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1111/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1745-3933.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='01256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='x Tu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Marsch, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 1997, Solar Physics, 171, 363, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1023/A:1004968327196 Valentini, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Servidio, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Perrone, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2014, Physics of Plasmas, 21, 082307, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1063/1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='4893301 van der Holst, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Manchester, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Frazin, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2010, Astrophys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', 725, 1373, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1088/0004-637X/725/1/1373 Werner, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Uzdensky, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Begelman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Cerutti, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Nalewajko, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2018, MNRAS, 473, 4840, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1093/mnras/stx2530 Younsi, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Porth, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Mizuno, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Fromm, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Olivares, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2020, in Perseus in Sicily: From Black Hole to Cluster Outskirts, ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Asada, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' de Gouveia Dal Pino, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Giroletti, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Nagai, & R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' Nemmen, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 342, 9–12, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1017/S1743921318007263 Zhdankin, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2021, ApJ, 922, 172, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='3847/1538-4357/ac222e Zhdankin, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Uzdensky, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Werner, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Begelman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2019, Physical review letters, 122, 055101 Zhdankin, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Werner, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', Uzdensky, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=', & Begelman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content=' 2017, PhRvL, 118, 055103, doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='118.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} +page_content='055103' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FtE0T4oBgHgl3EQfzQIy/content/2301.02669v1.pdf'} diff --git a/GNAyT4oBgHgl3EQfSvfA/vector_store/index.faiss b/GNAyT4oBgHgl3EQfSvfA/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..89a8d8632e06b279ba1fb06b445d8cbfd7f70e40 --- /dev/null +++ b/GNAyT4oBgHgl3EQfSvfA/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ba824056a343e2ed7974391fcce912df08dd4a0249a1e6f6021b0e8c6da4823 +size 5505069 diff --git a/GNAzT4oBgHgl3EQfHPvT/content/tmp_files/2301.01043v1.pdf.txt b/GNAzT4oBgHgl3EQfHPvT/content/tmp_files/2301.01043v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab9729cd0501686c6760d0015080adfd4fd3cf9d --- /dev/null +++ b/GNAzT4oBgHgl3EQfHPvT/content/tmp_files/2301.01043v1.pdf.txt @@ -0,0 +1,703 @@ +OFFSHORE-WIND ENERGY | RENEWABLE ENERGY +Wind energy potential of the German Bight
 +Limits and consequences of large-scale offshore wind energy use +Axel Kleidon +The wind blows stronger and more reliably over the sea than over land. Thus, offshore wind energy +is expected to make a major contribution to the energy transition in Germany, especially in the +German Bight. But what happens when a growing number of wind farms extract more and more +wind energy from the atmosphere? +The challenges of the energy transition for the next decades in Germany are enormous. It is true +that 15.9 % of primary energy demand was already covered by renewable energy in 2021 [1], and +a lower energy demand is expected in the future due to more modern technologies such as heat +pumps and electromobility. However, the transition to a complete, sustainable energy system that +is free of fossil fuels is still a long way off. +Many energy transition scenarios focus on the expansion of a combination of solar and wind +energy. These two types of renewable energy have the greatest potential in Germany [2] and +complement each other very well over the course of the year: while the Sun can supply a +particularly large amount of renewable energy in summer, it fails in winter. This can be +compensated for by wind energy, as the dark winter months are usually stormier than the summer. +Wind power generation at sea plays a special role in these scenarios. Wind blows stronger and +more continuously at sea than on land, so it can generate electricity more efficiently and reliably. +In Germany, expansion is planned mainly in the German Bight of the North Sea, where the +exclusive economic zone - i.e. the part of the sea that is administered by Germany beyond the +territorial sea - offers considerably more surface area than the Baltic Sea. For example, wind +farms with 6.7 GW of installed capacity are currently located in the North Sea, compared to only +1.1 GW in the Baltic Sea (as of 2021, [3]). In 2021, these wind farms contributed about 24 TWh/a +or 4.9 % to the German electricity demand of 491 TWh/a, which means that the turbines were +utilized to an average of 35 % - the so-called capacity factor [3]. Wind turbines at sea were thus +almost twice as productive as on land, where the capacity factor was only 18 %. +By 2050, it is assumed that the use of offshore wind energy will increase significantly more than +on land, i.e. onshore. In its coalition agreement, the German government has targeted the +expansion of offshore wind energy to 70 GW, i.e. roughly a tenfold increase in currently installed +capacity. Onshore, there is already 56 GW of turbine capacity, and an expansion to around 200 +GW is expected here, distributed over 2% of the country's surface area. However, with 357,000 +km2 there is considerably more space than in the exclusive economic zone of the North Sea, +which is only 28,600 km2 in size. So the plans envisage a much more intensive use of wind energy +at sea than on land. And because each wind turbine draws energy from the atmosphere and thus +weakens the winds, the question arises whether, with such a strong expansion, the turbines could +take the wind away from each other and thus endanger the high yields. + + of +1 +10 +Translated version +Originally published in German - Cite as: +Kleidon, A. (2023) "Windenergie in der +Deutschen Bucht", Physik in unserer Zeit, +54(1), 30-36. +https://doi.org/10.1002/piuz.202201654 + +Wind energy in the German Bight +This question was examined in a report by Agora Energiewende on the wind energy potential of +the North Sea [5]. I worked scientifically on this report and want to present the results here in a +comprehensible way. This study has also already been taken into account in the current, official +planning of offshore wind energy in Germany. In the following, I will go through the steps +necessary to determine the potential for electricity generation by wind energy use in the German +Bight. In particular, I want to make the effect of wind extraction by the turbines physically +plausible. +In the first step, we determined the areas that are potentially available for the expansion of wind +energy (Figure 1). There is a whole range of different uses of the sea. These include, of course, +shipping, which needs routes, certain areas are designated as nature reserves, there are areas +used for military purposes, and areas are needed for submarine cables and supply lines. These +areas preclude wind energy use, which significantly reduces the total area available. The usable +areas can be roughly divided into two areas separated by a wide route for shipping: the coastal +area 1 (blue in Figure 1) with 2767 km2 and the far-from-the-coast area 2 (red) with 4473 km2. +Next, we need technical information on the turbines that will be placed in these areas. For this +purpose, we choose a hypothetical 12 MW turbine with a rotor diameter of 200 m, which +corresponds to the specifications of the currently most powerful turbines. The power generation +of a single turbine is described by the so-called power curve. It shows how much electricity an + + of +2 +10 +56.0 +56.0 +55.0 +55.0 +54.0 +54.0 +3.0 +3.0 +4.0 +4.0 +5.0 +5.0 +6.0 +6.0 +7.0 +7.0 +8.0 +8.0 +9.0 +9.0 +Area 1 +Area 2 +Denmark +Schleswig +Holstein +The Netherlands +Lower +Saxony +Helgoland +North Sea +FINO-1 +Abbildung 2: Flächen der Deutschen Bucht, die zum Ausbau der Windenergie genutzt werden können. Die küstennahen Flächen in +dunkelblau werden hier als „Gebiet 1“ bezeichnet, während die roten, küstenfernen Flächen als „Gebiet 2“ zusammengefasst werden. +Die Lage der FINO-1 Messstation ist durch den weissen Kreis markiert. Karte modifiziert nach [5]. +3° +4° + +5° +6° +7° +8° +9° +56° +55° +54° +3° +4° +5° +6° +7° +8° +9° +55° +54° +Fig.1 Areas of the German Bight that can be used for the development of wind energy. +The areas close to the coast in dark blue are referred to here as area 1, the red areas far +from the coast as area 2. Black and white circle: position of the FINO-1 measuring station +(map modified according to [5]). + +isolated turbine produces at a prevailing wind speed, the so-called wind yield (Figure 2c). The +dependence on the wind speed can be roughly divided into four ranges: In calm conditions below +the cut-in velocity of 3 m/s, the turbine produces no electricity. In the second range up to the +rated velocity of 11.5 m/s, the output increases proportionally to the kinetic energy flux density +given by (1/2) ρ v3 , with the air density ρ = 1.2 kg/m3 and the wind speed v in m/s. The energy +flux density is then multiplied by the cross-sectional area spanned by the rotor and the power +coefficient of about 0.42 (that is, 42% of the kinetic energy flux density can be used) to determine +the yield in this range. In the third range above the rated wind speed up to the cut-out velocity of +28 m/s, the yield is determined by the capacity of the generator. Above this wind speed, the +turbine is shut down to protect against damage and does not generate any electricity. +The average yield of the wind turbine is then determined by combining the power curve with the +frequency distribution of wind speeds. For this purpose, we used the frequency distribution of +wind speeds (Figure 2a) measured by the FINO-1 measuring station in the German Bight at a +height of 100 m in the period 2004-2015. Its position is marked in Figure 1. These data show that +the absence of wind is relatively rare with an average of 5.8 %, the second range where the yield +depends directly on the wind speed is the most frequent with 61.5 %, 32.6 % of the time the +turbine operates at its capacity. In only 0.1 % of the time the turbine has to be shut down due to +excessive wind speeds. +In total, the turbine generates an average of 6.8 MW of electrical power or 59.1 GWh of electrical +energy per year. The efficiency of the energy generation can be described on the one hand by the +full load hours, with which the annual yield is simply described by the product with the capacity of +the turbine. The annual yield is thus calculated as 12 MW * x h/a = 59.1 GWh/a with x = 4928 full + + of +3 +10 +0 +5 +10 +0 +10 +20 +Monat +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +0 +5 +10 +0 +2 +4 +Windgeschwindigkeit (m/s) +0 +5 +10 +15 +20 +25 +30 +Month + Wind speed (m/s) +c. Yield (MW) +a. Frequency (%) +b. Wind (m/s) +d. Yield (MW) +Wind measurements +FINO-1 +Power generation +12 MW Turbine +Abbildung 1: Windbedingungen in der deutschen Bucht sowie deren Nutzung für Windenergieerzeugung durch eine isoliert stehenden +Turbine. a. Die Häufigkeitsverteilung zeigt Windmessungen auf 100m Höhe von der FINO-1 Messplattform in der Nordsee für die +Jahre 2004-2015 [4]. Die Position dieser Messstation ist in der Karte in Abbildung 2 markiert. b. Der saisonale Verlauf von +Windgeschwindigkeiten über die Monate ist gezeigt durch den Median, wobei der farbig unterlegte Bereich 25-75% der Verteilung +zeigen. c. Der Ertrag einer isoliert stehenden 12 MW Windturbine als Funktion der Windgeschwindigkeit sowie d. den saisonalen Gang +in der Nordsee. Links hellgrau markiert ist der Bereich, wo Windertrag mit der Windgeschwindigkeit ansteigt (61.5% der Zeit), +während im dunkelgrau markierten Bereich die Turbine an ihrer Kapazitätsgrenze operiert (32.6% der Zeit). +Wind conditions in the German Bight and their use by an isolated standing wind turbine. a) +Frequency distribution shows wind measurements 2004-2015 at 100 m height on FINO-1 in the +North Sea [4], position of this measuring station see Figure 1. b) The seasonal course of wind +speeds over the months is shown by the median, where the area highlighted in blue covers 25-75 +% of the distribution. c) Yield of an isolated 12 MW wind turbine as a function of wind speed and d) +its seasonal variation in the North Sea. Highlighted in light grey on the left is the range where wind +yield increases with wind speed (61.5% of the time), while in the range highlighted in dark grey the +turbine operates at its capacity limit (32.6% of the time). +FIG. 2 WIND IN THE GERMAN BIGHT + +load hours per year. On the other hand, the efficiency can be described by the capacity factor, +which describes the ratio of the average yield to the capacity of the turbine. In our case, the +capacity factor is 6.8 MW/12 MW = 56.7 %. The efficiency - or the capacity factor - is not only +described by the technical specification of the turbine, but also by the wind conditions. For +example, the capacity factor in Germany on land is only about 20 % [6]. In principle, the yield is +also subject to seasonal fluctuations, with higher yields in winter than in summer (Figure 2d). +Next, we considered different scenarios in which the two areas 1 and 2 are equipped with +different numbers of wind turbines. Three scenarios rely solely on the use of Area 1 for wind +energy because of its proximity to the coast makes the costs of installation, supply, and +connection to the power grid less expensive. These scenarios consider different installation +densities of 5, 10 and 20 MW per square kilometre. With an area of 2767 km2, this corresponds to +1153, 2306, and 4612 turbines with 12 MW capacity each. +In five other scenarios, we consider both areas with installation densities of 5, 7.5, 10, 12.5, and +20 MW/km2, with 3017 to 12067 turbines distributed evenly over the 7240 km2 of both areas +combined. This gives us a total of eight scenarios, covering a range of 14 to 145 GW of installed +capacity. The German government's expansion target of 70 GW is thus well covered. +Wind yield estimation +Next, we determined the total yield of the installed turbines for the different scenarios. A +seemingly obvious way to do so would be to simply multiply the yield of the isolated turbine by +the number of turbines. This gives us theoretical results for yields as shown by the light bars in +Figure 3. This type of estimation is currently widely used. Sometimes it is reduced by an +empirically determined park loss factor of 10 %, but sometimes it is even expected that +technological progress will actually increase turbine efficiency. The scenarios then result in a wind +yield of 7.8 to 82.1 GW or 68.2 to 713.6 TWh/a. By comparison, electricity consumption in +Germany in 2021 was around 491 TWh/a [3]. +However, this way of calculating yields does not take into account that wind turbines extract a +considerable amount of kinetic energy from the atmosphere. This weakens the wind and thus the +average efficiency of the turbines in the region. We can easily see this by looking at the kinetic +energy fluxes of the region (Box "KEBA: Kinetic Energy Balance of the Atmosphere" on p. 7). On +the one hand, there are the two inputs into the lower atmosphere of the region, the so-called +boundary layer, which over the North Sea is about 700 m thick: The first contribution comes from +the horizontal flow into the region, the second comes from above through vertical mixing. +Area 1 in Figure 1 has an area of 2767 km2. We consider it simplified as a square in the following, +with a length of about 52.6 km. If we assume a wind speed of 9.4 m/s, which corresponds to the +median of the frequency distribution in Figure 2a, this is in the range where the wind yield +increases with wind speed (Figure 2c). Thus, about 52.6 x 103 m x 7 x 102 m x (0.5 x 1.2) kg/m3 x +(9.4 m/s)3 ≈ 18.3 GW flows into the area, while the vertical replenishment is relatively small at +about 2.8 GW (see equations (2) and (3) in the Box "KEBA: Kinetic Energy Balance of the +Atmosphere"). Thus, 21.1 GW of kinetic energy enters Area 1 at this wind speed, which is already +quite close to the installed capacity of 14 GW for the smallest scenario for Area 1 with 1153 +turbines. So we can see that the wind turbines will extract an appreciable amount of kinetic +energy from the region and their effect must be taken into account. +For estimating the yields of different scenarios, we can take the balance of kinetic energy fluxes in +our virtual box (box "KEBA: Kinetic energy balance of the atmosphere" [7], and Figure 4). The + + of +4 +10 + +estimates from this approach are shown by the blue bars in Figure 3. The orange bars come from +calculations using a much more complex numerical weather prediction model. As we can see, the +results from both methods are very similar. So looking at the energy fluxes in the atmosphere is +the key to understanding the reduced yields from strong wind energy use. +For a complete balance of the kinetic energy flows, we also need to look at the loss terms. In +addition to the extraction of energy by the turbines, there is also the friction loss in the wake of +the turbines, surface friction as well as the export of kinetic energy into the areas downwind of the +wind farms. The effect of wind extraction can be represented comparatively simply with a +reduction factor, since all these components depend on the kinetic energy flux density. + + of +5 +10 + −18% + −30% + −47% + −25% + −33% + −40% + −46% + −58% + −14% + −26% + −18% + −26% + −33% + −39% + −52% +14 GW +28 GW +55 GW +36 GW +54 GW +72 GW +91 GW +145 GW +0 +100 +200 +300 +400 +500 +600 +700 +800 +Yield / TWh/a +KEBA +WRF +Electricity Generation 2021 +Installed in +Area 1 +Installed in +Areas 1 + 2 +No simulation +Installed Capacity / GW +Abbildung 3: Stromertrag verschiedener Ausbauszenarien der Offshore Windenergie in der Deutschen Bucht ohne (hell) und mit +(dunkel) dem Entzug von Windenergie durch die Turbinen. Die blauen Abschätzungen basieren auf dem KEBA Ansatz (siehe Kasten), +während die orangen Abschätzungen auf Berechnungen mit einem wesentlich komplexeren, numerisches Wettervorhersagemodell +(„WRF“) basieren. Die vertikale, schwarze Linie stellt den mittlere Stromverbrauch Deutschlands des Jahres 2020 dar. Als Vergleich: +Das Ausbauziel der Bundesregierung für die Offshore Windenergie für 2030 sind 30 GW installierte Kapazität, in 2050 bei 70 GW. +Daten aus [5]. +Electricity yield of different offshore wind energy expansion scenarios in the German Bight +without (light) and with (dark) the extraction of wind energy by the turbines. The blue +estimates are based on the KEBA approach (see "KEBA: The Kinetic Energy Balance of +the Atmosphere"), while the orange estimates are based on calculations with a much +more complex numerical weather prediction model (WRF). The vertical black line +represents Germany's average electricity consumption in 2021. As a comparison: the +German government's expansion target for offshore wind energy for 2030 is 30 GW of +installed capacity, in 2050 it is 70 GW (data from [5]). +FIG. 3 ELECTRICITY YIELD OF DIFFERENT SCENARIOS + +This factor depends primarily on the size of our virtual box and the number of turbines (see +formula (10) in the box "KEBA: Kinetic energy balance of the atmosphere"). It reduces the yield +especially at low wind speeds, since it then depends strongly on wind speed. At high wind +speeds, much more kinetic energy enters into our box, since the kinetic energy fluxes depend on +the third power of the wind speed. In this case, the turbines operate at their capacity, which +means that lowering the wind speed does not affect their yield as much. +Figure 5 shows an example of the change in the various contributions in the kinetic energy +balance for the scenarios. The natural case without wind energy use is also included. Here, the +input of kinetic energy is balanced with surface friction and downwind export. The more wind +energy is used in the areas, the more the terms shift towards electricity generation (yellow in +Figure 5) and frictional losses to the wakes (orange). This is at the expense of surface friction and +export. These two terms are directly coupled to wind speed, so wind speed must decrease. This +can clearly be seen in the frequency distribution of wind speeds (Figure 6), which shifts towards +lower values with greater use. + + of +6 +10 +Vertical transport of +kinetic energy +Jin,v +Horizontal +flux of +kinetic +energy +Jin,h +Boundary +layer +height H +Width W +Length L +Friction +Dfric +Electricity generation Gturb +and wake dissipation Dwake +Horizontal +flux of +kinetic +energy +Jout,h +Effective +wind speed +Abbildung Kasten: Bilanz der kinetischen Energie der Atmosphäre über der Region, in der Windenergie genutzt wird, und aus der die +effektive Windgeschwindigkeit bestimmt werden kann. +Kinetic energy balance of the atmosphere over the region in which wind energy is used +and from which an effective wind speed can be calculated. See box "KEBA: Kinetic +energy balance of the atmosphere" for explanation of symbols and how this calculation is +done. +FIG. 4 KINETIC ENERGY BALANCE OF THE ATMOSPHERE + + + of +7 +10 +The effect of wind extraction by turbines can be described +quite simply and physically with the help of the kinetic +energy balance of the lower atmosphere [7]. For this +purpose, we consider the air volume above the area of the +planned wind farms (Figure 4), with width W and length L, +as well as a height H. This height comprises the boundary +layer in which the lower atmosphere is well mixed. Over the +North Sea this is usually around 700 m high. +We now consider the components that contribute, export, +or convert kinetic energy in this volume. These are the +kinetic energy inputs from upwind areas and from above, +Jin,h (dark blue arrow in the figure) and Jin,v (light blue arrow), +the export Jout,h downwind (purple arrow), the frictional loss +due to surface friction Dfric (red arrow), the extraction by the +turbines for power generation Gturb (yellow arrow), and the +frictional losses in the wakes due to the mixing of +surrounding air masses Dwake (orange arrow). +Jin,h + Jin,v = Jout,h + Dfric + Gturb + Dwake . +(1) +The horizontal input of kinetic energy is described by: +Jin,h = [(ρ/2) vin3 ] * W * H. + + +(2) +The expression in the brackets describes the kinetic energy +flux density, with the air density ρ of about 1.2 kg/m3 near +the sea surface and the wind speed vin. We can describe +the input of kinetic energy from the free atmosphere, which +is above the boundary layer, by vertical mixing through the +friction loss at the surface when there are no wind turbines, +because then these two terms balance. This is described +by: +Jin,v = ρ Cd vin3 x W x L + + +(3) +where Cd represents the drag coefficient and which is +typically about 0.001 over sea. +If wind turbines are present, we describe the wind speed +within the volume by an effective speed v. It will be lower +than vin, because the wind turbines change the kinetic +energy balance of the volume. We use this effective wind +speed to describe the other four terms of the balance. We +write the export of kinetic energy into downwind areas +analogous to (2) as +Jout,h = [(ρ/2) v3 ] x W x H. + + +(4) +For the friction loss we write similar to (3): +Dfric = ρ Cd v3 x W x L. + + +(5) +The power generation, or yield, of the wind turbines in the +range when power depends on the wind speed, is given by +Gturb = [(ρ/2) v3 ] x η x Arotor x N, + +(6) +where η is the power coefficient of the turbine, typically η +0.42, Arotor is the cross-sectional area spanned by the rotor +blades, in the case of our 12 MW turbine this is 31415 m2, +and N is the number of wind turbines. +For the friction loss in the turbine wakes, we assume 50% +of the power extracted from the wind by the turbines as a +realistic value. Thus it follows: +Dwake = 0,5 x Gturb . + + +(7) +The four terms of the right-hand side of the kinetic energy +balance (1) all depend on v3, so we can easily obtain the +effective wind speed by rearranging the equation. This can +then be described as +v = fred1/3 vin, + + + +(8) +and the amount of electricity generated by +Gturb = fred [(ρ/2) vin3 ] x η x Arotor x N. + +(9) +Here, fred is a reduction factor describing the effect of wind +extraction from the volume: +. + + + + + +(10) +Note that for an isolated turbine (N = 1) this factor is 1, so +there is no yield reduction. The higher the number of +turbines and the larger the rotor area, the smaller the factor +becomes, the wind is weakened and the yield is reduced. In +the case where the turbines operate at their capacity, a +similar expression can be derived. +The application to the 72 GW scenario is briefly illustrated +here: With H = 700 m, W = L ≈ 85 090 m, Cd = 0.001, η = +0.42, and N = 6033, this results in a factor of fred = 870 m/ +(870 m + 1404 m) = 0.38. When yields depends on wind +speed, this factor implies that wind extraction causes the +yields to drop to 38%, a 62% reduction, while wind speeds +have only dropped by 28%. However, this is only a partial +aspect of the overall yield, as there are still times when the +turbines operate at their capacity. Therefore, the reduction +in Figure 3 is less dramatic at 40%. The various +components of the kinetic energy balance can then be +determined by combining the observed energy flux density +(505 W m–2 in the median at vin = 9.4 m/s) with the +parameters and equations. +These KEBA calculations are available as a spreadsheet for +yield estimates on the Internet [8]. +fred = +H + 2Cd ⋅ L +H + 2Cd ⋅ L + 3/2 ⋅ ηArotor ⋅ (N − 1)/W +KEBA: Kinetic Energy Balance of the Atmosphere + +
 + + of +8 +10 +72 GW +28 GW +14 GW +0 GW +9.4 +5.9 +5.4 +6.5 +0 +2 +4 +6 +8 +0 +5 +10 +15 +20 +25 +30 +Jin,h +Jin,v +Dfric +Jout +Gturb +Dwake +0 GW +14 GW +28 GW +55 GW +36 GW +54 GW +72 GW +91 GW +145 GW +0 +5 +10 +15 +20 +25 +30 +35 +40 +Energy Fluxes (GW) +Wind speeds (m/s) +Frequency (%) +Installed in +Area 1 +Installed in +Areas 1 + 2 +Abbildung 4: a. Komponenten der kinetischen Energiebilanz +für die Szenarien der Abbildung 3, geschätzt mit dem KEBA +Ansatz (Farbgebung wie im Kasten). b. Häufigkeitsverteilung +der Windgeschwindigkeiten von 3 Szenarien, die die +Verschiebung zu geringeren Windgeschwindigkeiten bei mehr +Windenergienutzung illustrieren. Die Zahlenwerte geben den +Median der jeweiligen Verteilungen an. +Components of the kinetic energy balance for the scenarios of Figure 3, the upper light +blue section again applies to Area 1 alone, the lower white section to Area 1 and 2 +together. The values are estimated with the KEBA approach (colouring as in Figure 4, +explanation of the symbols in the legend and box "KEBA: Kinetic energy balance of the +atmosphere"). +FIG. 5 KINETIC ENERGY COMPONENTS + +Conclusions +Overall, this gives us a differentiated picture of the contribution that offshore wind energy can +make to the energy transition: On the one hand, the potential to generate electricity is huge, even +with the associated, significant reductions due to wind extraction from the turbines. For example, +the 72 GW scenario can cover more than a third of Germany's current electricity consumption. On +the other hand, the use is much more efficient if wind farms are less dense and distributed over +larger areas. This can be seen in a direct comparison of the scenarios 55 GW installed in Area 1 +and 54 GW installed in both areas (Figures 3 and 5). In the latter case, the reduction effect is +much smaller, as the turbines are distributed over a much larger area. +This weakening effect will therefore play an increasingly important role in the expansion of +offshore wind energy. It is independent of the technology, the size of the turbines or the +positioning of the turbines in the wind farm. After all, the main effect has to do with what the +turbines are there to do: To extract energy from the wind in order to generate electricity with it. + + of +9 +10 +72 GW +28 GW +14 GW +0 GW +9.4 +5.9 +5.4 +6.5 +0 +2 +4 +6 +8 +0 +5 +10 +15 +20 +25 +30 +Jin,h +Jin,v +Dfric +Jout +Gturb +Dwake +0 GW +14 GW +28 GW +55 GW +36 GW +54 GW +72 GW +91 GW +145 GW +0 +5 +10 +15 +20 +25 +30 +35 +40 +Energy Fluxes (GW) +Wind speeds (m/s) +Frequency (%) +Installed in +Area 1 +Installed in +Areas 1 + 2 +Abbildung 4: a. Komponenten der kinetischen Energiebilanz +für die Szenarien der Abbildung 3, geschätzt mit dem KEBA +Ansatz (Farbgebung wie im Kasten). b. Häufigkeitsverteilung +der Windgeschwindigkeiten von 3 Szenarien, die die +Verschiebung zu geringeren Windgeschwindigkeiten bei mehr +Windenergienutzung illustrieren. Die Zahlenwerte geben den +Median der jeweiligen Verteilungen an. +Frequency distribution of wind speeds of three scenarios illustrating the shift to lower +wind speeds with more wind energy use. The numerical values indicate the median of the +respective distributions. +FIG. 6 WIND SPEEDS + +Summary +A significant contribution to the energy transition is expected from offshore wind energy in the +German Bight. Due to the strong and steady winds, offshore electricity generation appears to be +very efficient. For 2050, the German government assumes an installed capacity of 70 gigawatts, a +tenfold increase compared to today. But what happens when so many wind turbines draw their +energy from the wind? This can be easily determined with the help of the kinetic energy balance of +the atmosphere above the wind farms. Since the input of kinetic energy is limited, the more wind +energy is used, the lower the wind speeds in the region must be, and with them the efficiency of +the turbines. So less electricity is generated than would be expected without this effect. At 70 GW, +that would reduce electricity generation by as much as 40%. Still, it could meet a large part of the +current electricity demand. For the efficient use of wind energy at sea, it is therefore advisable to +plan wind farms as widely dispersed as possible in order to reduce their influence on the wind +fields. +Keywords +Wind energy, offshore, energy transition, renewable energy, full load hours, kinetic energy, wind +speed, power rating, capacity factor, kinetic energy balance of the atmosphere (KEBA). +The author +Axel Kleidon studied physics and meteorology at the University of Hamburg and Purdue +University, Indiana, USA. He received his doctorate from the Max Planck Institute for Meteorology +in 1998 for his work on the influence of deep-rooted vegetation on the climate system. He +subsequently conducted research at Stanford University in California and at the University of +Maryland. Since 2006, he has headed the independent research group "Biospheric Theory and +Modelling" at the Max Planck Institute for Biogeochemistry in Jena. His research interests range +from the thermodynamics of the Earth system to the natural limits of renewable energy sources. +Address +Dr. Axel Kleidon, Max Planck Institute for Biogeochemistry, Postfach 10 01 64, 07701 Jena. +axel.kleidon@bgc-jena.mpg.de +Literature +[1] https://ag-energiebilanzen.de/daten-und-fakten/primaerenergieverbrauch. +[2] A. Kleidon, Physik in unserer Zeit 2019, 50(3), 120.
 +[3] https://www.energy-charts.info/downloads/Stromerzeugung_2021.pdf. +[4] http://www.fino1.de. +[5] https://www.agora-energiewende.de/en/publications/making-the-most-of-offshore-wind. +[6] S. Germer, A. Kleidon, PLoS ONE 2019, 14(2), e0211028. +[7] A. Kleidon, L. M. Miller, Geosci. Model Dev., 2020, 13, 4993. +[8] A. Kleidon, The Kinetic Energy Budget of the lower Atmosphere (KEBA) Model: Files from the project 'Making the +most of offshore wind', commissioned by Agora Energiewende and Agora Verkehrswende, 2020, https://doi.org/ +10.17617/3.3h. + + of +10 +10 + diff --git a/GNAzT4oBgHgl3EQfHPvT/content/tmp_files/load_file.txt b/GNAzT4oBgHgl3EQfHPvT/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..57d7cb3db6c719e4c83a2cd5819d87df9cb705fd --- /dev/null +++ b/GNAzT4oBgHgl3EQfHPvT/content/tmp_files/load_file.txt @@ -0,0 +1,373 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf,len=372 +page_content='OFFSHORE-WIND ENERGY | RENEWABLE ENERGY Wind energy potential of the German Bight Limits and consequences of large-scale offshore wind energy use Axel Kleidon The wind blows stronger and more reliably over the sea than over land.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Thus, offshore wind energy is expected to make a major contribution to the energy transition in Germany, especially in the German Bight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' But what happens when a growing number of wind farms extract more and more wind energy from the atmosphere?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The challenges of the energy transition for the next decades in Germany are enormous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' It is true that 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='9 % of primary energy demand was already covered by renewable energy in 2021 [1], and a lower energy demand is expected in the future due to more modern technologies such as heat pumps and electromobility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' However, the transition to a complete, sustainable energy system that is free of fossil fuels is still a long way off.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Many energy transition scenarios focus on the expansion of a combination of solar and wind energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' These two types of renewable energy have the greatest potential in Germany [2] and complement each other very well over the course of the year: while the Sun can supply a particularly large amount of renewable energy in summer, it fails in winter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This can be compensated for by wind energy, as the dark winter months are usually stormier than the summer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Wind power generation at sea plays a special role in these scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Wind blows stronger and more continuously at sea than on land, so it can generate electricity more efficiently and reliably.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In Germany, expansion is planned mainly in the German Bight of the North Sea, where the exclusive economic zone - i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' the part of the sea that is administered by Germany beyond the territorial sea - offers considerably more surface area than the Baltic Sea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For example, wind farms with 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='7 GW of installed capacity are currently located in the North Sea, compared to only 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='1 GW in the Baltic Sea (as of 2021, [3]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In 2021, these wind farms contributed about 24 TWh/a or 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='9 % to the German electricity demand of 491 TWh/a, which means that the turbines were utilized to an average of 35 % - the so-called capacity factor [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Wind turbines at sea were thus almost twice as productive as on land, where the capacity factor was only 18 %.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' By 2050, it is assumed that the use of offshore wind energy will increase significantly more than on land, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' onshore.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In its coalition agreement, the German government has targeted the expansion of offshore wind energy to 70 GW, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' roughly a tenfold increase in currently installed capacity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=" Onshore, there is already 56 GW of turbine capacity, and an expansion to around 200 GW is expected here, distributed over 2% of the country's surface area." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' However, with 357,000 km2 there is considerably more space than in the exclusive economic zone of the North Sea, which is only 28,600 km2 in size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' So the plans envisage a much more intensive use of wind energy at sea than on land.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' And because each wind turbine draws energy from the atmosphere and thus weakens the winds, the question arises whether, with such a strong expansion, the turbines could take the wind away from each other and thus endanger the high yields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' of 1 10 Translated version Originally published in German - Cite as: Kleidon, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' (2023) "Windenergie in der Deutschen Bucht", Physik in unserer Zeit, 54(1), 30-36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='1002/piuz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='202201654 Wind energy in the German Bight This question was examined in a report by Agora Energiewende on the wind energy potential of the North Sea [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' I worked scientifically on this report and want to present the results here in a comprehensible way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This study has also already been taken into account in the current, official planning of offshore wind energy in Germany.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In the following, I will go through the steps necessary to determine the potential for electricity generation by wind energy use in the German Bight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In particular, I want to make the effect of wind extraction by the turbines physically plausible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In the first step, we determined the areas that are potentially available for the expansion of wind energy (Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' There is a whole range of different uses of the sea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' These include, of course, shipping, which needs routes, certain areas are designated as nature reserves, there are areas used for military purposes, and areas are needed for submarine cables and supply lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' These areas preclude wind energy use, which significantly reduces the total area available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The usable areas can be roughly divided into two areas separated by a wide route for shipping: the coastal area 1 (blue in Figure 1) with 2767 km2 and the far-from-the-coast area 2 (red) with 4473 km2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Next, we need technical information on the turbines that will be placed in these areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For this purpose, we choose a hypothetical 12 MW turbine with a rotor diameter of 200 m, which corresponds to the specifications of the currently most powerful turbines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The power generation of a single turbine is described by the so-called power curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' It shows how much electricity an of 2 10 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 Area 1 Area 2 Denmark Schleswig Holstein The Netherlands Lower Saxony Helgoland North Sea FINO-1 Abbildung 2: Flächen der Deutschen Bucht, die zum Ausbau der Windenergie genutzt werden können.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Die küstennahen Flächen in dunkelblau werden hier als „Gebiet 1“ bezeichnet, während die roten, küstenfernen Flächen als „Gebiet 2“ zusammengefasst werden.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Die Lage der FINO-1 Messstation ist durch den weissen Kreis markiert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Karte modifiziert nach [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' 3° 4° 5° 6° 7° 8° 9° 56° 55° 54° 3° 4° 5° 6° 7° 8° 9° 55° 54° Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='1 Areas of the German Bight that can be used for the development of wind energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The areas close to the coast in dark blue are referred to here as area 1, the red areas far from the coast as area 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Black and white circle: position of the FINO-1 measuring station (map modified according to [5]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' isolated turbine produces at a prevailing wind speed, the so-called wind yield (Figure 2c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The dependence on the wind speed can be roughly divided into four ranges: In calm conditions below the cut-in velocity of 3 m/s, the turbine produces no electricity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In the second range up to the rated velocity of 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5 m/s, the output increases proportionally to the kinetic energy flux density given by (1/2) ρ v3 , with the air density ρ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='2 kg/m3 and the wind speed v in m/s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The energy flux density is then multiplied by the cross-sectional area spanned by the rotor and the power coefficient of about 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='42 (that is, 42% of the kinetic energy flux density can be used) to determine the yield in this range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In the third range above the rated wind speed up to the cut-out velocity of 28 m/s, the yield is determined by the capacity of the generator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Above this wind speed, the turbine is shut down to protect against damage and does not generate any electricity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The average yield of the wind turbine is then determined by combining the power curve with the frequency distribution of wind speeds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For this purpose, we used the frequency distribution of wind speeds (Figure 2a) measured by the FINO-1 measuring station in the German Bight at a height of 100 m in the period 2004-2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Its position is marked in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' These data show that the absence of wind is relatively rare with an average of 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='8 %, the second range where the yield depends directly on the wind speed is the most frequent with 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5 %, 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='6 % of the time the turbine operates at its capacity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In only 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='1 % of the time the turbine has to be shut down due to excessive wind speeds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In total, the turbine generates an average of 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='8 MW of electrical power or 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='1 GWh of electrical energy per year.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The efficiency of the energy generation can be described on the one hand by the full load hours, with which the annual yield is simply described by the product with the capacity of the turbine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The annual yield is thus calculated as 12 MW * x h/a = 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='1 GWh/a with x = 4928 full of 3 10 0 5 10 0 10 20 Monat 1 2 3 4 5 6 7 8 9 10 11 12 0 5 10 0 2 4 Windgeschwindigkeit (m/s) 0 5 10 15 20 25 30 Month Wind speed (m/s) c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Yield (MW) a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Frequency (%) b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Wind (m/s) d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Yield (MW) Wind measurements FINO-1 Power generation 12 MW Turbine Abbildung 1: Windbedingungen in der deutschen Bucht sowie deren Nutzung für Windenergieerzeugung durch eine isoliert stehenden Turbine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Die Häufigkeitsverteilung zeigt Windmessungen auf 100m Höhe von der FINO-1 Messplattform in der Nordsee für die Jahre 2004-2015 [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Die Position dieser Messstation ist in der Karte in Abbildung 2 markiert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Der saisonale Verlauf von Windgeschwindigkeiten über die Monate ist gezeigt durch den Median, wobei der farbig unterlegte Bereich 25-75% der Verteilung zeigen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Der Ertrag einer isoliert stehenden 12 MW Windturbine als Funktion der Windgeschwindigkeit sowie d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' den saisonalen Gang in der Nordsee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Links hellgrau markiert ist der Bereich, wo Windertrag mit der Windgeschwindigkeit ansteigt (61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5% der Zeit), während im dunkelgrau markierten Bereich die Turbine an ihrer Kapazitätsgrenze operiert (32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='6% der Zeit).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Wind conditions in the German Bight and their use by an isolated standing wind turbine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' a) Frequency distribution shows wind measurements 2004-2015 at 100 m height on FINO-1 in the North Sea [4], position of this measuring station see Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' b) The seasonal course of wind speeds over the months is shown by the median, where the area highlighted in blue covers 25-75 % of the distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' c) Yield of an isolated 12 MW wind turbine as a function of wind speed and d) its seasonal variation in the North Sea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Highlighted in light grey on the left is the range where wind yield increases with wind speed (61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5% of the time), while in the range highlighted in dark grey the turbine operates at its capacity limit (32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='6% of the time).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' 2 WIND IN THE GERMAN BIGHT load hours per year.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' On the other hand, the efficiency can be described by the capacity factor, which describes the ratio of the average yield to the capacity of the turbine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In our case, the capacity factor is 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='8 MW/12 MW = 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='7 %.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The efficiency - or the capacity factor - is not only described by the technical specification of the turbine, but also by the wind conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For example, the capacity factor in Germany on land is only about 20 % [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In principle, the yield is also subject to seasonal fluctuations, with higher yields in winter than in summer (Figure 2d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Next, we considered different scenarios in which the two areas 1 and 2 are equipped with different numbers of wind turbines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Three scenarios rely solely on the use of Area 1 for wind energy because of its proximity to the coast makes the costs of installation, supply, and connection to the power grid less expensive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' These scenarios consider different installation densities of 5, 10 and 20 MW per square kilometre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' With an area of 2767 km2, this corresponds to 1153, 2306, and 4612 turbines with 12 MW capacity each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In five other scenarios, we consider both areas with installation densities of 5, 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5, 10, 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5, and 20 MW/km2, with 3017 to 12067 turbines distributed evenly over the 7240 km2 of both areas combined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This gives us a total of eight scenarios, covering a range of 14 to 145 GW of installed capacity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=" The German government's expansion target of 70 GW is thus well covered." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Wind yield estimation Next, we determined the total yield of the installed turbines for the different scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' A seemingly obvious way to do so would be to simply multiply the yield of the isolated turbine by the number of turbines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This gives us theoretical results for yields as shown by the light bars in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This type of estimation is currently widely used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Sometimes it is reduced by an empirically determined park loss factor of 10 %, but sometimes it is even expected that technological progress will actually increase turbine efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The scenarios then result in a wind yield of 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='8 to 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='1 GW or 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='2 to 713.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='6 TWh/a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' By comparison, electricity consumption in Germany in 2021 was around 491 TWh/a [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' However, this way of calculating yields does not take into account that wind turbines extract a considerable amount of kinetic energy from the atmosphere.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This weakens the wind and thus the average efficiency of the turbines in the region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' We can easily see this by looking at the kinetic energy fluxes of the region (Box "KEBA: Kinetic Energy Balance of the Atmosphere" on p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' On the one hand, there are the two inputs into the lower atmosphere of the region, the so-called boundary layer, which over the North Sea is about 700 m thick: The first contribution comes from the horizontal flow into the region, the second comes from above through vertical mixing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Area 1 in Figure 1 has an area of 2767 km2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' We consider it simplified as a square in the following, with a length of about 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='6 km.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' If we assume a wind speed of 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='4 m/s, which corresponds to the median of the frequency distribution in Figure 2a, this is in the range where the wind yield increases with wind speed (Figure 2c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Thus, about 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='6 x 103 m x 7 x 102 m x (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5 x 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='2) kg/m3 x (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='4 m/s)3 ≈ 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='3 GW flows into the area, while the vertical replenishment is relatively small at about 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='8 GW (see equations (2) and (3) in the Box "KEBA: Kinetic Energy Balance of the Atmosphere").' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Thus, 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='1 GW of kinetic energy enters Area 1 at this wind speed, which is already quite close to the installed capacity of 14 GW for the smallest scenario for Area 1 with 1153 turbines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' So we can see that the wind turbines will extract an appreciable amount of kinetic energy from the region and their effect must be taken into account.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For estimating the yields of different scenarios, we can take the balance of kinetic energy fluxes in our virtual box (box "KEBA: Kinetic energy balance of the atmosphere" [7], and Figure 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The of 4 10 estimates from this approach are shown by the blue bars in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The orange bars come from calculations using a much more complex numerical weather prediction model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' As we can see, the results from both methods are very similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' So looking at the energy fluxes in the atmosphere is the key to understanding the reduced yields from strong wind energy use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For a complete balance of the kinetic energy flows, we also need to look at the loss terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In addition to the extraction of energy by the turbines, there is also the friction loss in the wake of the turbines, surface friction as well as the export of kinetic energy into the areas downwind of the wind farms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The effect of wind extraction can be represented comparatively simply with a reduction factor, since all these components depend on the kinetic energy flux density.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='of ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−18% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−30% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−47% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−25% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−33% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−40% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−46% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−58% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−14% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−26% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−18% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−26% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−33% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−39% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='−52% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='14 GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='28 GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='55 GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='36 GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='54 GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='72 GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='91 GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='145 GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='700 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='800 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='Yield / TWh/a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='KEBA ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='WRF ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='Electricity Generation 2021 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='Installed in ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='Area 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='Installed in ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='Areas 1 + 2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='No simulation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='Installed Capacity / GW ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='Abbildung 3: Stromertrag verschiedener Ausbauszenarien der Offshore Windenergie in der Deutschen Bucht ohne (hell) und mit ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='(dunkel) dem Entzug von Windenergie durch die Turbinen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Die blauen Abschätzungen basieren auf dem KEBA Ansatz (siehe Kasten), während die orangen Abschätzungen auf Berechnungen mit einem wesentlich komplexeren, numerisches Wettervorhersagemodell („WRF“) basieren.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Die vertikale, schwarze Linie stellt den mittlere Stromverbrauch Deutschlands des Jahres 2020 dar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Als Vergleich: Das Ausbauziel der Bundesregierung für die Offshore Windenergie für 2030 sind 30 GW installierte Kapazität, in 2050 bei 70 GW.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Daten aus [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Electricity yield of different offshore wind energy expansion scenarios in the German Bight without (light) and with (dark) the extraction of wind energy by the turbines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The blue estimates are based on the KEBA approach (see "KEBA: The Kinetic Energy Balance of the Atmosphere"), while the orange estimates are based on calculations with a much more complex numerical weather prediction model (WRF).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=" The vertical black line represents Germany's average electricity consumption in 2021." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=" As a comparison: the German government's expansion target for offshore wind energy for 2030 is 30 GW of installed capacity, in 2050 it is 70 GW (data from [5])." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' 3 ELECTRICITY YIELD OF DIFFERENT SCENARIOS This factor depends primarily on the size of our virtual box and the number of turbines (see formula (10) in the box "KEBA: Kinetic energy balance of the atmosphere").' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' It reduces the yield especially at low wind speeds, since it then depends strongly on wind speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' At high wind speeds, much more kinetic energy enters into our box, since the kinetic energy fluxes depend on the third power of the wind speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In this case, the turbines operate at their capacity, which means that lowering the wind speed does not affect their yield as much.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Figure 5 shows an example of the change in the various contributions in the kinetic energy balance for the scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The natural case without wind energy use is also included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Here, the input of kinetic energy is balanced with surface friction and downwind export.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The more wind energy is used in the areas, the more the terms shift towards electricity generation (yellow in Figure 5) and frictional losses to the wakes (orange).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This is at the expense of surface friction and export.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' These two terms are directly coupled to wind speed, so wind speed must decrease.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This can clearly be seen in the frequency distribution of wind speeds (Figure 6), which shifts towards lower values with greater use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' of 6 10 Vertical transport of kinetic energy Jin,v Horizontal flux of kinetic energy Jin,h Boundary layer height H Width W Length L Friction Dfric Electricity generation Gturb and wake dissipation Dwake Horizontal flux of kinetic energy Jout,h Effective wind speed Abbildung Kasten: Bilanz der kinetischen Energie der Atmosphäre über der Region, in der Windenergie genutzt wird, und aus der die effektive Windgeschwindigkeit bestimmt werden kann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Kinetic energy balance of the atmosphere over the region in which wind energy is used and from which an effective wind speed can be calculated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' See box "KEBA: Kinetic energy balance of the atmosphere" for explanation of symbols and how this calculation is done.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' 4 KINETIC ENERGY BALANCE OF THE ATMOSPHERE of 7 10 The effect of wind extraction by turbines can be described quite simply and physically with the help of the kinetic energy balance of the lower atmosphere [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For this purpose, we consider the air volume above the area of the planned wind farms (Figure 4), with width W and length L, as well as a height H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This height comprises the boundary layer in which the lower atmosphere is well mixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Over the North Sea this is usually around 700 m high.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' We now consider the components that contribute, export, or convert kinetic energy in this volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' These are the kinetic energy inputs from upwind areas and from above, Jin,h (dark blue arrow in the figure) and Jin,v (light blue arrow), the export Jout,h downwind (purple arrow), the frictional loss due to surface friction Dfric (red arrow), the extraction by the turbines for power generation Gturb (yellow arrow), and the frictional losses in the wakes due to the mixing of surrounding air masses Dwake (orange arrow).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Jin,h + Jin,v = Jout,h + Dfric + Gturb + Dwake .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' (1) The horizontal input of kinetic energy is described by: Jin,h = [(ρ/2) vin3 ] * W * H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' (2) The expression in the brackets describes the kinetic energy flux density, with the air density ρ of about 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='2 kg/m3 near the sea surface and the wind speed vin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' We can describe the input of kinetic energy from the free atmosphere, which is above the boundary layer, by vertical mixing through the friction loss at the surface when there are no wind turbines, because then these two terms balance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This is described by: Jin,v = ρ Cd vin3 x W x L (3) where Cd represents the drag coefficient and which is typically about 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='001 over sea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' If wind turbines are present, we describe the wind speed within the volume by an effective speed v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' It will be lower than vin, because the wind turbines change the kinetic energy balance of the volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' We use this effective wind speed to describe the other four terms of the balance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' We write the export of kinetic energy into downwind areas analogous to (2) as Jout,h = [(ρ/2) v3 ] x W x H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' (4) For the friction loss we write similar to (3): Dfric = ρ Cd v3 x W x L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' (5) The power generation, or yield, of the wind turbines in the range when power depends on the wind speed, is given by Gturb = [(ρ/2) v3 ] x η x Arotor x N, (6) where η is the power coefficient of the turbine, typically η 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='42, Arotor is the cross-sectional area spanned by the rotor blades, in the case of our 12 MW turbine this is 31415 m2, and N is the number of wind turbines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For the friction loss in the turbine wakes, we assume 50% of the power extracted from the wind by the turbines as a realistic value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Thus it follows: Dwake = 0,5 x Gturb .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' (7) The four terms of the right-hand side of the kinetic energy balance (1) all depend on v3, so we can easily obtain the effective wind speed by rearranging the equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This can then be described as v = fred1/3 vin, (8) and the amount of electricity generated by Gturb = fred [(ρ/2) vin3 ] x η x Arotor x N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' (9) Here, fred is a reduction factor describing the effect of wind extraction from the volume: .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' (10) Note that for an isolated turbine (N = 1) this factor is 1, so there is no yield reduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The higher the number of turbines and the larger the rotor area, the smaller the factor becomes, the wind is weakened and the yield is reduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In the case where the turbines operate at their capacity, a similar expression can be derived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The application to the 72 GW scenario is briefly illustrated here: With H = 700 m, W = L ≈ 85 090 m, Cd = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='001, η = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='42, and N = 6033, this results in a factor of fred = 870 m/ (870 m + 1404 m) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' When yields depends on wind speed, this factor implies that wind extraction causes the yields to drop to 38%, a 62% reduction, while wind speeds have only dropped by 28%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' However, this is only a partial aspect of the overall yield, as there are still times when the turbines operate at their capacity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Therefore, the reduction in Figure 3 is less dramatic at 40%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The various components of the kinetic energy balance can then be determined by combining the observed energy flux density (505 W m–2 in the median at vin = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='4 m/s) with the parameters and equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' These KEBA calculations are available as a spreadsheet for yield estimates on the Internet [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' fred = H + 2Cd ⋅ L H + 2Cd ⋅ L + 3/2 ⋅ ηArotor ⋅ (N − 1)/W KEBA: Kinetic Energy Balance of the Atmosphere of 8 10 72 GW 28 GW 14 GW 0 GW 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='9 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='4 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5 0 2 4 6 8 0 5 10 15 20 25 30 Jin,h Jin,v Dfric Jout Gturb Dwake 0 GW 14 GW 28 GW 55 GW 36 GW 54 GW 72 GW 91 GW 145 GW 0 5 10 15 20 25 30 35 40 Energy Fluxes (GW) Wind speeds (m/s) Frequency (%) Installed in Area 1 Installed in Areas 1 + 2 Abbildung 4: a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Komponenten der kinetischen Energiebilanz für die Szenarien der Abbildung 3, geschätzt mit dem KEBA Ansatz (Farbgebung wie im Kasten).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Häufigkeitsverteilung der Windgeschwindigkeiten von 3 Szenarien, die die Verschiebung zu geringeren Windgeschwindigkeiten bei mehr Windenergienutzung illustrieren.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Die Zahlenwerte geben den Median der jeweiligen Verteilungen an.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Components of the kinetic energy balance for the scenarios of Figure 3, the upper light blue section again applies to Area 1 alone, the lower white section to Area 1 and 2 together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The values are estimated with the KEBA approach (colouring as in Figure 4, explanation of the symbols in the legend and box "KEBA: Kinetic energy balance of the atmosphere").' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' 5 KINETIC ENERGY COMPONENTS Conclusions Overall, this gives us a differentiated picture of the contribution that offshore wind energy can make to the energy transition: On the one hand, the potential to generate electricity is huge, even with the associated, significant reductions due to wind extraction from the turbines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=" For example, the 72 GW scenario can cover more than a third of Germany's current electricity consumption." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' On the other hand, the use is much more efficient if wind farms are less dense and distributed over larger areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This can be seen in a direct comparison of the scenarios 55 GW installed in Area 1 and 54 GW installed in both areas (Figures 3 and 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' In the latter case, the reduction effect is much smaller, as the turbines are distributed over a much larger area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This weakening effect will therefore play an increasingly important role in the expansion of offshore wind energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' It is independent of the technology, the size of the turbines or the positioning of the turbines in the wind farm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' After all, the main effect has to do with what the turbines are there to do: To extract energy from the wind in order to generate electricity with it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' of 9 10 72 GW 28 GW 14 GW 0 GW 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='4 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='9 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='4 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='5 0 2 4 6 8 0 5 10 15 20 25 30 Jin,h Jin,v Dfric Jout Gturb Dwake 0 GW 14 GW 28 GW 55 GW 36 GW 54 GW 72 GW 91 GW 145 GW 0 5 10 15 20 25 30 35 40 Energy Fluxes (GW) Wind speeds (m/s) Frequency (%) Installed in Area 1 Installed in Areas 1 + 2 Abbildung 4: a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Komponenten der kinetischen Energiebilanz für die Szenarien der Abbildung 3, geschätzt mit dem KEBA Ansatz (Farbgebung wie im Kasten).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Häufigkeitsverteilung der Windgeschwindigkeiten von 3 Szenarien, die die Verschiebung zu geringeren Windgeschwindigkeiten bei mehr Windenergienutzung illustrieren.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Die Zahlenwerte geben den Median der jeweiligen Verteilungen an.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Frequency distribution of wind speeds of three scenarios illustrating the shift to lower wind speeds with more wind energy use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The numerical values indicate the median of the respective distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' 6 WIND SPEEDS Summary A significant contribution to the energy transition is expected from offshore wind energy in the German Bight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Due to the strong and steady winds, offshore electricity generation appears to be very efficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For 2050, the German government assumes an installed capacity of 70 gigawatts, a tenfold increase compared to today.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' But what happens when so many wind turbines draw their energy from the wind?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' This can be easily determined with the help of the kinetic energy balance of the atmosphere above the wind farms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Since the input of kinetic energy is limited, the more wind energy is used, the lower the wind speeds in the region must be, and with them the efficiency of the turbines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' So less electricity is generated than would be expected without this effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' At 70 GW, that would reduce electricity generation by as much as 40%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Still, it could meet a large part of the current electricity demand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' For the efficient use of wind energy at sea, it is therefore advisable to plan wind farms as widely dispersed as possible in order to reduce their influence on the wind fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Keywords Wind energy, offshore, energy transition, renewable energy, full load hours, kinetic energy, wind speed, power rating, capacity factor, kinetic energy balance of the atmosphere (KEBA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' The author Axel Kleidon studied physics and meteorology at the University of Hamburg and Purdue University, Indiana, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' He received his doctorate from the Max Planck Institute for Meteorology in 1998 for his work on the influence of deep-rooted vegetation on the climate system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' He subsequently conducted research at Stanford University in California and at the University of Maryland.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Since 2006, he has headed the independent research group "Biospheric Theory and Modelling" at the Max Planck Institute for Biogeochemistry in Jena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' His research interests range from the thermodynamics of the Earth system to the natural limits of renewable energy sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Address Dr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Axel Kleidon, Max Planck Institute for Biogeochemistry, Postfach 10 01 64, 07701 Jena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' axel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='kleidon@bgc-jena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='mpg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='de Literature [1] https://ag-energiebilanzen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='de/daten-und-fakten/primaerenergieverbrauch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' [2] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Kleidon, Physik in unserer Zeit 2019, 50(3), 120.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' [3] https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='energy-charts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='info/downloads/Stromerzeugung_2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' [4] http://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='fino1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='de.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' [5] https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='agora-energiewende.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='de/en/publications/making-the-most-of-offshore-wind.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' [6] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Germer, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Kleidon, PLoS ONE 2019, 14(2), e0211028.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' [7] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Kleidon, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Miller, Geosci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' Model Dev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=', 2020, 13, 4993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' [8] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=" Kleidon, The Kinetic Energy Budget of the lower Atmosphere (KEBA) Model: Files from the project 'Making the most of offshore wind', commissioned by Agora Energiewende and Agora Verkehrswende, 2020, https://doi." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='org/ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='17617/3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content='3h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} +page_content=' of 10 10' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GNAzT4oBgHgl3EQfHPvT/content/2301.01043v1.pdf'} diff --git a/GNE0T4oBgHgl3EQfhQF8/content/2301.02429v1.pdf b/GNE0T4oBgHgl3EQfhQF8/content/2301.02429v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..267933f8c575e49c21109c2479f428a80fc040eb --- /dev/null +++ b/GNE0T4oBgHgl3EQfhQF8/content/2301.02429v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39907e5785a38da2ae3013576fb7f2afa7720d643474c50301894d739e833eba +size 6200898 diff --git a/HNA0T4oBgHgl3EQfBv_D/content/2301.01981v1.pdf b/HNA0T4oBgHgl3EQfBv_D/content/2301.01981v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2727b247e271a54334e394a9bedd824f79788efd --- /dev/null +++ b/HNA0T4oBgHgl3EQfBv_D/content/2301.01981v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce1a8ea7702cb27bceb763e548465eaee633ea4f74ab8033e0eb75dd7bb83da9 +size 147147 diff --git a/HNA0T4oBgHgl3EQfBv_D/vector_store/index.faiss b/HNA0T4oBgHgl3EQfBv_D/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..6456d1c4e3a3dbb9ec9beee01bd4254f39b3dc0f --- /dev/null +++ b/HNA0T4oBgHgl3EQfBv_D/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fa8a6f43d4cecc024a5440e813e31ea9ffab283d2f16965628f14d61c648377 +size 458797 diff --git a/HNA0T4oBgHgl3EQfBv_D/vector_store/index.pkl b/HNA0T4oBgHgl3EQfBv_D/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..76e11a2177feebfe5eba7166e0c59f83ce337af3 --- /dev/null +++ b/HNA0T4oBgHgl3EQfBv_D/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46cdab3f042c790980611323bb9523c73b220793e158a2a1f3c7e71c21552bb6 +size 22588 diff --git a/HtAyT4oBgHgl3EQfrvl-/vector_store/index.pkl b/HtAyT4oBgHgl3EQfrvl-/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..eb40f03c5c0ae3dcc4992fd2f3f864fd61f7baf7 --- /dev/null +++ b/HtAyT4oBgHgl3EQfrvl-/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e0e4d2e0b64b088b02e53af22bb09150b64db0d5c9b1d4fb8611a2cca05bd3b +size 319987 diff --git a/JdAyT4oBgHgl3EQfsPlo/vector_store/index.faiss b/JdAyT4oBgHgl3EQfsPlo/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..56a7bd2f13bb244fc814edf0d86f94a38e250140 --- /dev/null +++ b/JdAyT4oBgHgl3EQfsPlo/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1560de9f9e420c0b40dc0eb650433053484f1d45e5759b826feea1c961323254 +size 3211309 diff --git a/JdE1T4oBgHgl3EQfsAXy/content/tmp_files/2301.03362v1.pdf.txt b/JdE1T4oBgHgl3EQfsAXy/content/tmp_files/2301.03362v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..15967bfb6782a52baa5286903d07473c6c1a7481 --- /dev/null +++ b/JdE1T4oBgHgl3EQfsAXy/content/tmp_files/2301.03362v1.pdf.txt @@ -0,0 +1,3282 @@ +Image Denoising: The Deep Learning Revolution and Beyond +– A Survey Paper – +Michael Elad, Bahjat Kawar and Gregory Vaksman +The Computer Science Department, Technion – Israel Institute of Technology +email: {elad,bahjat.kawar,grishavak}@cs.technion.ac.il +Abstract. +Image denoising – removal of additive white Gaussian noise from an image – is one of the oldest and most +studied problems in image processing. An extensive work over several decades has led to thousands of papers on +this subject, and to many well-performing algorithms for this task. Indeed, ten years ago, these achievements +have led some researchers to suspect that “Denoising is Dead”, in the sense that all that can be achieved in this +domain has already been obtained. However, this turned out to be far from the truth, with the penetration of +deep learning (DL) into the realm of image processing. The era of DL brought a revolution to image denoising, +both by taking the lead in today’s ability for noise suppression in images, and by broadening the scope of +denoising problems being treated. Our paper starts by describing this evolution, highlighting in particular the +tension and synergy that exist between classical approaches and modern Artificial Intelligence (AI) alternatives +in design of image denoisers. +The recent transitions in the field of image denoising go far beyond the ability to design better denoisers. +In the second part of this paper we focus on recently discovered abilities and prospects of image denoisers. We +expose the possibility of using image denoisers for service of other problems, such as regularizing general inverse +problems and serving as the prime engine in diffusion-based image synthesis. We also unveil the (strange?) +idea that denoising and other inverse problems might not have a unique solution, as common algorithms would +have us believe. Instead, we describe constructive ways to produce randomized and diverse high perceptual +quality results for inverse problems, all fueled by the progress that DL brought to image denoising. +This is a survey paper, and its prime goal is to provide a broad view of the history of the field of image +denoising and closely related topics in image processing. Our aim is to give a better context to recent discoveries, +and to the influence of the AI revolution in our domain. +Key words. Image denoising, Inverse problems, MMSE Estimation, Plug and Play Prior (PnP), Regularization +by Denoising (RED), Langevin Dynamics, Diffusion Models, Image Synthesis, Perceptual Quality, +Perception-Distortion Trade-off. +1. Introduction. Within the wide fields of image processing and computational imaging, +the task of image denoising has been given an exceptionally large attention over the past +several decades. Indeed, noise suppression in images is one of the oldest and most studied +problems in these fields, with numerous papers offering diverse algorithms, analysis of this +task in various forms, or extensions of it.1 A substantial portion of the proposed denoising +techniques has been dedicated to the removal of Additive White Gaussian Noise (AWGN) +from images, while there are other contributions that target different noise distributions, e.g. +Poisson, salt-and-pepper, and more. +1See Figure 2.1 for the quantities of denoising related papers over the years. +1 +arXiv:2301.03362v1 [eess.IV] 9 Jan 2023 + +2 +M. ELAD, B. KAWAR AND G. VAKSMAN +Removal of noise from an image is an actual necessity that comes up with practically every +imaging sensor [191]. However, the interest in this problem goes far beyond this practical +need – image denoising is the simplest inverse problem, and as such, it has been recognized +over the years as the perfect test-bed for assessing new ideas that are often brought to image +processing. In recent years this appeal has further widened with the realization that denoisers +can serve other imaging needs [295, 231, 260]. +The years 1980 – 2010 have seen consistently improving denoising algorithms, many of which +relying on the Bayesian point of view. This progress has been geared by an evolution of image +priors that form the backbone of the overall progress in image processing. This path, which we +will refer to as the classical era, started with the early L2-based regularization, proceeding to +robust statistics, moving to the introduction of wavelets, and the later deployment of partial +differential equations to imaging tasks, and this continued all the way to sparse modeling, +patch-based methods, and low-rank structure assumptions2. This extensive work over several +decades has led to many well-performing denoising algorithms, and to a compelling and rich +scientific field. In fact, ten years ago, these glorious achievements have led some researchers +to consider the possibility that “Denoising is Dead”, believing that the existing solutions are +already touching the achievable performance ceiling [45, 162, 163]. +The past decade has brought a paradigm shift to the general topic of data processing due to +the emergence of the Artificial Intelligence (AI) revolution. The great success of this deep +learning (DL) trend has also introduced a reformation to the broad field of image processing, +and to image denoising in particular. These new winds led to novel techniques for designing +better performing denoisers [50, 330, 167, 332, 270, 159, 8, 339, 165], and discovering new and +more daring ways for deploying them and broadening their scope [1, 170, 323, 288, 102, 166, +146, 212, 113]. These days, deep-learning based denoisers are at the very top in their ability +for noise suppression in images (see e.g. [330, 165, 322], leaving no competitive room for the +classical alternatives). +In parallel to the above and seemingly detached from the deep learning activity, image de- +noising has been also a topic of investigation and discoveries of a different flavor: Harness- +ing denoiser engines for other imaging tasks. This started with the surprising idea that a +good performing denoiser can serve as a prior, offering a highly effective regularization to +inverse problems [295, 231, 28, 139, 283, 268, 192, 280, 49, 55]. +This continued with the +discovery that such denoisers can also be used for randomly synthesizing images by offer- +ing a practical sampling from the prior distribution of images, this way posing a potent +competition to Generative Adversarial Networks (GANs) and other image generation meth- +ods [260, 261, 262, 120, 287, 68, 122, 143, 121]. +An intriguing sequel to the above synthesis revelation is the idea that solution of inverse +problems could be revisited and posed as a sampling task from the posterior distribution of +the image given the measurements, thus resorting again to image denoisers as the means for +obtaining these solutions. This very recent line of work unveiled the daring idea that denoising +and other inverse problems might not have a unique solution, as common algorithms would +2As referencing this is too long, we provide specific citations to each of these in later sections. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +3 +have us believe [212, 146, 138, 145, 211]. Instead, this sampling approach has been shown to +lead to constructive ways for producing randomized and diverse high perceptual quality results +for inverse problems, exposing as a byproduct the inner uncertainty in such tasks. +All the above achievements have been strongly influenced and fueled by the progress that +DL brought to image denoising. Adopting a wider perspective, image denoising these days +has new horizons, and if any conclusion can be drawn from these recent accomplishments, it +would be that this field is a very much alive playground with great challenges and prospects. +This paper aims to disclose and detail the compelling story drawn above. Our prime goal is +to provide a broad view of the history of the field of image denoising and closely related topics +in image processing, give a better context to recent discoveries, and highlight the influence of +the AI revolution in our domain. +We start our journey in Section 2 by clearly defining the image denoising task, discussing its ill- +posed nature, and demonstrating its appeal over the years. We proceed in Sections 3 and 4 by +describing the evolution of image denoisers, from the classical era to the deep-learning-based +alternatives. Section 5 highlights the tension and the possible synergy that exists between +classical approaches and modern Artificial Intelligence (AI) alternatives in design of image +denoisers. +In the second part of the paper we change gears and move to discuss three recent discoveries +that consider image denoisers as building blocks for other needs. We start broadly in Section +6 by defining the denoiser engine and its properties, and set the stage for the presentation +of these three discoveries. We proceed in Section 7 by discussing the ability to deploy these +engines for regularizing inverse problems. Section 8 exposes the possibility of synthesizing +images using such denoisers, and Section 9 presents the notion of targeting perfect perceptual +quality outcomes in image denoising and inverse problems by sampling from the posterior +distribution. We conclude this paper in Section 10 with an attempt to point to open questions +and potential research directions. +Disclaimer: While this paper aims to present a survey on the various ups and downs that the +field of image denoising has gone through over the years, it would be simply impossible to do +justice to all the published literature in this domain. We apologize if some papers are omitted +from our references, as we attempt to mark the critical milestones in the history of this field. +The interested reader is referred to [156, 197, 130, 18, 92, 281] for other surveys with different +orientations. +2. Image Denoising – Background. +2.1. Problem Definition. Our story must start with a proper definition of the denoising +problem, and this will also serve the need for defining our notations hereafter. An ideal image3 +x ∈ RN is assumed to be drawn from the image manifold, represented by the probability +density function p(x). Our measurement is the vector y ∈ RN, given by +y = x + v, +(2.1) +3For simplicity of the discussion, assume that we refer to grayscale images. Addressing color is discussed +shortly in Section 4. + +4 +M. ELAD, B. KAWAR AND G. VAKSMAN +where v ∈ RN is a zero-mean independent and identically distributed (i.i.d.) Gaussian noise, +i.e. v ∼ N(0, σ2I). The denoising task is the recovery of x from y with the knowledge of σ, +and a denoiser is thus a function of the form ˆx = D(y, σ). +While there are many ways for assessing the performance of such denoisers, the most common +one is the Mean-Squared-Error (MSE) measure, +MSE = E +� +∥x − ˆx∥2 +2 +� += E +� +∥x − D(y, σ)∥2 +2 +� +, +(2.2) +where the expectation is taken over the image distribution. A well-known result in estimation +theory states that the best denoising with respect to this measure (i.e., achieving the Minimum +MSE, thus referred to as MMSE) is given by [217], +ˆxMMSE = E (x|y) . +(2.3) +This formula is misleadingly simple in its concise form, as designing a denoiser that achieves +MMSE is quite challenging and oftentimes simply impossible. By the way, the curious reader +may wonder why are we emphasizing the MSE measure and the MMSE denoiser. The answer +will be carefully unfolded in the later parts of the paper, where these choices play a critical +role. A brief note about this appears later in this section. +How hard is it to denoise an image? How complicated could it be? Again, the simplicity of +the problem definition is illusive, as this task is highly tough and in fact ill-posed. One could +easily design a filtering method for attenuating and suppressing the noise in y, but such a +process is very likely to ruin the image content as well, losing small details, sacrificing edges, +damaging fine textures, and more. +2.2. The Gaussianity Assumption. In the problem definition above we focused on a very +specific case of a zero-mean i.i.d. Gaussian noise contamination. The natural question arising +is why are we restricting the discussion to this case? A brief inspection of the literature on +image denoising reveals that this noise model is very popular, covered by most of the developed +algorithms. Where this popularity comes from? Several answers come to mind: +• Central Limit Theorem: Noise in imaging may arise due to many physical reasons, +and their accumulation leads often to a Gaussian distribution of the form discussed +above, as an empirical manifestation of the Central Limit Theorem [277, 127]. As +such, rather than modelling the intricate noise origins, a Gaussian assumption offers +a blessed simplification for the later analysis and algorithm development in this field. +• The Poisson Alternative: One might rightfully argue that the proper distribution +to address for imaging noise would be the Poisson one, as imaging sensors essentially +count photons, and their arrival is of Poissonian nature [59]. While this argument +is indeed correct, when photon counts are high, the Poisson distribution becomes a +Gaussian one [26]. If the counts are low, a variance stabilizing transform, such as +Anscombe [7], can turn these measurements into additive Gaussian contaminated, +again resorting to the Gaussianity regime [81, 325, 233, 184, 12, 272, 334]. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +5 +• Mathematical Elegance: The Gaussian case is easily modeled, and consequent +formulations become simple and elegant. +Such is the case with the log-likelihood +function p(y|x) and other related derivations that will be shown in subsequent sections. +• MMSE Denoiser Engines: Our last argument for the Gaussianity assumption is +quite surprising and unfamiliar to many in our field. +As it turns out, an MMSE +denoiser for the removal of zero-mean i.i.d. +Gaussian noise is of great theoretical +importance. Such an engine has critical properties that enable its deployment as a +prior (see Section 7) for inverse problems. In addition, and perhaps more importantly, +such denoisers have strong theoretical ties to the score function [260], a fact that will +be highlighted and exploited in Sections 8-9.2. +2.3. Extensions of Image Denoising. There are many variations to the core image de- +noising task mentioned above. These can be roughly divided into four sub-categories: (i) +Handling different noise statistics; (ii) Addressing structured noise removal; (iii) Consider- +ing different and various visual content; and (iv) Posing different problem assumptions and +settings. Lets us briefly describe each of these. +A natural extension of the original denoising problem posed above is to consider other noise +statistics, such as Poisson (also referred to as shot-noise) denoising [93, 248, 100, 65, 325, 291, +233, 184, 226, 12], salt-and-pepper noise removal [40, 264, 75, 300, 210], treating mixtures of +Poisson and Gaussian noise [176, 326, 184], and more. Other extensions consider structured +noise, such as quantization noise in compression artifact removal [206, 176, 297, 63, 106], +film-grain removal [312, 311, 62], and textured or otherwise colored noise [108, 193, 3, 219]. +Another challenging task is noise removal in scenarios in which the noise is not spatially +homogeneous, such as white noise with spatially varying σ [187, 332, 344, 148]. The inpainting +problem [90, 182, 306, 134] can be regarded as a special such case, where portions of the +image are simply missing and need to be revived. These missing pixels can be regarded as +contaminated by a very strong noise, while other regions of the image are reliably measured. +The denoising task may assume a different setting altogether if the visual content is of different +form. Such an example is noise reduction in bursts of snapshots [173, 102, 198, 189, 86, 166, 80], +where several images are treated jointly. Somewhat similar yet different is the task of video +denoising [179, 9, 10, 275, 276, 290, 259, 180, 322, 161], in which we may seek online filtering +of the incoming frames. When handling specific imaging types (e.g., microscopy [223, 25, 13, +103, 340, 186, 158, 188], CT [164, 48, 318, 316, 70, 314] and PET/SPECT imaging [51, 82, +105, 227, 343, 266], and more), the algorithm design may require adequate adaptations to the +data format (e.g. treating 3D volumes [294, 336, 324, 64, 178]) or to the way it is captured. +The last category of extensions has to do with our prior knowledge when addressing denoising +tasks. Blind denoising [131, 169, 157, 47, 201, 256, 342, 321, 114] refers to the case in which the +noise is known to be i.i.d. Gaussian, but σ is unknown, and may be even spatially changing. +A more complex situation is when the noise statistics are totally unknown [345, 8, 2, 307]. In +this context, a special case of great interest in recent years is removal of true noise from given +images captured by digital cameras (e.g., cellphones) [298, 149, 170, 285, 301, 128]. + +6 +M. ELAD, B. KAWAR AND G. VAKSMAN +2.4. The Interest in Image Denoising. Figure 2.1 presents a graph showing the number +of papers that have been published each year on the topic of image denoising. Overall, nearly +30, 000 such papers are identified by Clarivate Web-Of-Science (WoS), published mostly in +the past 25 years. As such, this is clearly one of the most heavily studied topics in image +processing, and perhaps in exact sciences in general. +Also evident from this graph is the +consistent growth over the years in this topic. Where does this popularity come from? +Figure 2.1: The number of papers on the image denoising topic over the years. This graph +corresponds to the search topic=((image or video ) and (denoising or (noise and remov) or +clean)) performed on December 1st 2022 in Clarivate Web-of-Science (WoS). Note that the +lower count in 2022 does not stand for a new trend, but rather caused by a delayed reporting +of new papers. +A prime reason to study image denoising is its practical relevance to imaging systems. Removal +of noise from acquired visual information is an actual necessity that comes up with practically +every imaging sensor [191]. Thus, various algorithms have been developed for implementation +in image processing software packages and within the ISP (Image Signal Processor) – the path +that starts with the raw acquired data and ends with a high quality image – of every digital +camera [23, 30, 298, 338, 124]. +Beyond the obvious practical motivation described above, the interest in image denoising +has other, more profound, roots. Image denoising is the simplest inverse problem, and as +such, it has been recognized over the years as the perfect platform for assessing new ideas +that are often brought to image processing. Indeed, all the major milestone advancements +in image processing started with denoising experiments, so as to explore their validity to +visual data. +This was the case with Tikhonov-Arsenin’s regularization theory [282, 104], +Wavelets [185], non–linear filtering based on partial differential equations [302, 111], sparse + +2800 - +2600 - +2400 - +2200 - +2000 - +1800 - +1600 - +1400 - +1200 - +1000 - +800 - +600 - +400 . +200 IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +7 +modeling of data [31, 88], and more. All these and many other sub-fields in imaging sciences +saw image denoising as a critical first step in their diffusion into broad image processing tasks. +We discuss these in greater details in the next section. +The above two reasons for the popularity of image denoising may account for many of the +published papers in this domain. However, the reason we have chosen to write this paper +has to do with a third, and very different, origin of popularity. Image denoising has gained +much interest and appeal in recent years due to the surprising realization that denoisers can +serve other imaging needs, thus widening their scope and influence [295, 231, 260, 120, 145]. +This discovery relies on a fundamental theoretical connection between denoisers and the prior +distribution of images [200, 265, 84]. This bridge provides a solid and well-motivated approach +to old and new tasks in image processing. In fact, this is the topic we shall be highlighting in +the latter sections of our paper. We thus defer a more detailed explanation of these ideas. +3. Image Denoising – The Classic Era. So far we have discussed image denoisers without +concretely diving into the actual quest of their construction. So, how can we design an image +denoiser? Not so surprisingly, the answer to this question has evolved and changed over the +years, with the accumulated knowledge and the progress in signal and image processing. And +still, we may broadly separate this progress in design of image denoisers into two eras - the +classical one that started in the 70’s and ended in the past decade, and the AI revolution era +that started around 2012 and is very much vivid till this day. In this section we shall focus +on the classical algorithms, and more specifically on the Bayesian point of view that played a +key role in their creation. +3.1. The Bayesian Point of View for Design of Denoisers. Starting with Equation +(2.1), given the noisy image y and knowing that v ∼ N(0, σ2I), our goal is to estimate x. A +simple approach towards this task would be the Maximum-Likelihood Estimation (MLE) [205, +235], seeking ˆx that maximizes the conditional probability p(y|x), essentially maximizing the +likelihood of the given measurements y. Due to the Gaussianity of the noise, this probability +is given easily by +p(y|x) = const · exp +�−∥x − y∥2 +2 +2σ2 +� +, +(3.1) +and maximizing it amounts to the trivial and fruitless solution: ˆxMLE = y. This outcome +is a direct manifestation of the ill-posedness of the denoising problem, exposing the need for +more information for its solution. +p(x|y) = p(y|x) · p(x) +p(y) += const · exp +�−∥x − y∥2 +2 +2σ2 +� +· p(x). +(3.2) +In the last equality we have absorbed the denominator p(y) into the constant as it is inde- +pendent of x. While this expression is a simple modification to the MLE (multiplying the +likelihood by the prior p(x)), this is in fact a significant change, as it regularizes the inversion +process from y to x. + +8 +M. ELAD, B. KAWAR AND G. VAKSMAN +Two commonly used estimators that exploit p(x|y) are the MAP and the MMSE. The first +is obtained by maximizing this posterior, leading to the Maximum A’Posteriori Probability +(MAP) estimation [205, 235], given by4 +ˆxMAP = arg min +x +�∥x − y∥2 +2 +2σ2 +− log (p(x)) +� +. +(3.3) +As opposed to the MLE, ˆxMAP is dictated by two forces, the first pulling it towards y, while +the other seeks a “well-behaved” result that leads to a low value of − log (p(x)) – this is exactly +the regularization mentioned above. +Similarly, the MMSE estimation [132] is also reliant on the posterior probability obtained, as +shown in Equation (2.3), via5 +ˆxMMSE = E (x|y) = +� +x +xp(x|y)dx. +(3.4) +While this expression is very concise and clear, operating with it has proven to be quite chal- +lenging due to the need for the partition function – the normalizing factor of this distribution. +This explains the vast popularity of the MAP-based approach among the classical methods. +Be it the MAP or the MMSE, the Bayesian point of view requires access to p(x) or proxies +of it. This brings us to the next discussion on the evolution of priors in image processing and +their impact on the design of denoisers. +3.2. Evolution of Priors. A key player in image processing is the prior, p(x), the proba- +bility density function of the image distribution. Modeling p(x) and using it for problems in +visual data processing have served as the skeleton of our field, and defined its trajectory over +the years. Below we outline the central milestones in the evolution of modeling p(x). +One critical theme to remember is the fact that the expression − log(p(x)), which appears in +the popular MAP estimation (see equation (3.3)), should assume a closed-form expression so +as to lend itself to a manageable numerical optimization. For this reason, most attempts to +characterize p(x) have chosen to use the Gibbs distribution form [132], p(x) = c·exp{−ρ(x)}, +shifting our focus from p(x) to the energy function ρ(x). +So, what should ρ(x) be to properly describe the image distribution? In order to keep this +discussion concise, we present in Table 3.1 a brief list of possible analytical expressions for +this function, without diving into their meaning, inter-relations, and effect. A more detailed +explanation of these expressions is provided in Appendix B. Please bear in mind the fact that +this na¨ıve approach of choosing an expression for ρ(x) is nothing short of a fantastic feat – +can we really expect a simple formula to grasp the richness of the image content distribution? +The evolution of the ideas in Table 3.1 is characterized by several major and interconnected +trends – the migration from the familiar Gaussian distribution to the less intuitive heavy- +tailed ones, the departure from L2 to sparsity-promoting measures such as the L1, the drift +4This minimization is obtained by taking the − log of the above expression. +5See Appendix A for a derivation of this statement. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +9 +Table 3.1: Evolution of priors for images. +Years +Core concept +Formulae for ρ(·) +∼ 1970 +Energy regularization +∥x∥2 +2 +1975-1985 +Spatial smoothness +∥Lx∥2 +2 or ∥Dvx∥2 +2 + ∥Dhx∥2 +2 +1980-1985 +Optimally Learned Transform +∥Tx∥2 +2 = xT R−1x (via PCA) +1980-1990 +Weighted smoothness +∥Lx∥2 +W +1990-2000 +Robust statistics +1T µ{Lx} e.g., Hubber-Markov +1992-2005 +Total-Variation +� +v∈Ω |∇x(v)|dv = 1T � +|Dvx|2 + |Dhx|2 +1987-2005 +Other PDE-based options +� +v∈Ω g +� +∇x(v), ∇2x(v) +� +dv +2005-2009 +Field-of-Experts +� +k λk1T µk{Lkx} +1993-2005 +Wavelet sparsity +∥Wx∥1 +2000-2010 +Self-similarity +� +k +� +j∈Ω(k) d{Rkx, Rjx} +2002-2012 +Sparsity methods +∥α∥0 s.t. x = Dα +2010-2017 +Low-Rank assumption +� +k ∥XΩ(k)∥∗ +from linear approximation techniques (e.g. PCA) to non-linear ones (e.g. wavelets and sparse +modeling), and above all, the replacement of axiomatic expressions with learned priors. +3.3. Other Classical Denoisers. While the above-described Bayesian approach has proven +to be quite productive, yielding a wide variety of denoising methods, alternative and more +direct design techniques for such algorithms were also considered. Here we mention few such +methods, some relying on the general notion of spatially adaptive smoothing of image content, +while others leverage self-similarity that often-times exists in images. +Consider the following rough motivating idea: Recall that a denoiser should attenuate ran- +dom i.i.d. Gaussian noise while preserving the image content. When operating on a noisy +pixel y[i, j], our intuitive strategy is to open a neighborhood around it, Ω[i, j], for averaging +purposes. If it so happens that the local image content in Ω[i, j] behaves like a tilted plane, a +simple averaging of these neighborhood pixels would provide a perfect local noise suppression. +When the local behavior deviates from this simple structure, the averaging mask should take +this into account and adapt accordingly. +This is exactly the idea behind the Bilateral filter [284, 87] and the Beltrami-Flow [255], in +which the averaging weight takes into account two forces – (i) the proximity of the weighted +pixel to the center of the neighborhood; and (ii) the proximity of this pixel’s value to the center +pixel’s value, indicating its relevance to the averaging. Computing these weights for each pixel +y[i, j] and normalizing them to sum to one creates the local averaging kernel to apply. This +way, if Ω[i, j] covers an edge between two regions, averaging will be restricted to the “relevant” +pixels while discarding others. Non-Local-Means [32] takes this approach one step further by +widening Ω[i, j] to a semi-local region, and by assessing pixels’ relevance to the averaging by +patch-matching instead of scalar value comparisons. This way we keep the spatially adaptive +averaging concept, but robustify it and make it non-local. Kernel-regression [271] is also a +spatially adaptive averaging technique, but one that relies on a local parametric estimation of + +10 +M. ELAD, B. KAWAR AND G. VAKSMAN +the pixels’ gray-values in Ω[i, j]. A 2D Gaussian is fitted to the pixels in Ω[i, j], dictating its +orientation and span, and this way offering a smoothing along edges instead of across ones. +Another direct image denoising method that deserves our specific attention, especially due +to its superior performance, is the BM3D algorithm [61]. This technique relies on the expec- +tation that 2D-DCT transformed local patches in natural images are expected to be sparse. +Furthermore, by gathering groups of similar patches from the overall image area, this trans- +formed sparsity should align in support. Thus, BM3D builds a 3D cube of similar patches for +each pixel y[i, j], transforms this cube together and forces a joint sparsity outcome. Among +the classical denoising algorithms, BM3D is considered among the very best approaches in +terms of MSE results. In this context, we also mention the Weighted Nuclear Norm Minimiza- +tion (WNNM) denoising method [110] and its followups (e.g. [310]). These rely on a similar +rationale to the BM3D, but replace the joint sparsity by a low-rank assumption. +3.4. Is denoising dead?. The paper “Is Denoising Dead?”, published in 2009 by Chat- +terjee and Milanfar [45], exposed a disturbing feeling shared by many in our community at +the time – a suspicion that we are touching the ceiling in terms of denoising ability. This +impression relied on the considerable progress in design of denoising algorithms during the +preceding years, and the fact that very different approaches towards this problem were found +to lead to comparable denoising performance. +A followup work [162, 163] by Levin and +Nadler in 2011-2012 addressed the same question. Both lines of work suggested a derivation +of an approximated lower-bound of the MSE for noise removal ability. Without diving into +the specifics of their derivations, we should mention that both concluded that there is still +room for some improvement, even though this claim was not made constructively, leaving the +question of how to obtain better techniques vague at best. +From a practical point of view, and despite these optimistic conclusions, the progress in +denoising performance after 2010-2011 was very slow and of diminishing returns. Indeed, the +graph in Figure 2.1 shows a decrease in the number of papers on image denoising around 2010. +However, this setback held true mostly for classically oriented methods of the kind discussed +above. The emergence of deep neural networks brought a massive change to our domain, +shattering the common belief about the end of this field, and the folklore around the attained +performance limit. +Indeed, deep learning brought new ways for the design of highly effective image denoisers, +taking the lead in today’s ability for noise suppression in images. However, the AI revolution +had a much wider impact on the image denoising task, opening new horizons to possibilities +and abilities never dealt with before. Among many such directions, these include (i) image +adaptation; (ii) true noise removal; and (iii) addressing new denoising objectives. +In the +following section we discuss all these with much greater details. +While the past decade can certainly be titled as the era of AI revolution, there has been +another revolution, perhaps of a bigger scale, that took place in parallel in the field of image +processing – one that refers to the discovery that an image denoiser can serve other tasks. From +the seminal paper on the Plug-and-Play Priors [295], through Regularization by Denoising +paper [231], and all the way to the recent and exciting diffusion-based image synthesis [260, + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +11 +120], image denoisers are taking a new and much more exciting role in image processing. As +this is the main theme of this paper, We shall expand on this line of work in Section 6 and +after. +So, to summarize, for the question ‘is denoising dead?’ our answer is ‘definitely not!’, and this +is due to the vast influence of deep learning, and other new directions that brought new life +to this domain. The rest of this paper is dedicated to the description of these developments +and their impact and prospects. +4. Image Denoising – The Deep Learning Revolution. The recently discovered ability +to effectively train deep neural networks for classification, regression and other tasks should +not be taken lightly. Nothing in this process is well-understood or well-justified. Indeed, the +opposite is true – with overparametrized networks and a highly non-convex objective function, +it is quite surprising that such networks are able to learn and generalize at all. And yet they +do! This is the essence of the AI revolution that has found its way to so many fields, impacting +each in a profound way. +Image processing and computational imaging is yet another playground that has been deeply +influenced by this AI revolution. Today’s practice and theory in image processing is entirely +different from the ones considered only 10 years ago. Indeed, image processing undergraduate +and graduate courses had to change dramatically due to these new winds of change. +And all this brings us to the new era of image denoising. In Section 3 we asked how should +image denoisers be designed, and gave an answer that relies on the classical Bayesian approach. +We now return to this question, and provide an entirely different answer – one that builds on +supervised deep-learning. This approach takes the following steps: +1. Start by gathering a large6 dataset of clean images of diverse content - the kind of +which we aim to denoise. We shall denote this set as X = {xk}M +k=1. For simplicity +assume that all images are of the same size. If this is not the case, an easy process of +random tile extraction may convert the given data to this desired structure. +2. Recall that our goal is a design of a denoiser that removes additive white Gaussian +noise of a specific strength σ. Thus, the next step is to create noisy instances of X, i.e. +Y = {yk}M +k=1, where for 1 ≤ k ≤ M, yk = xk + vk and vk ∼ N(0, σ2I). In fact, every +example xk could be contaminated by several noise realizations, this way enriching +the training set. +3. Define a parametric denoising architecture ˆx = DΘ(y, σ) that should be trained to +perform the denoising task. This stage is necessarily vague as there are many options +for constructing such an architecture, and there seems to be no clear guidelines for +its structure. Indeed, the literature offers various such options conceived by trial-and- +error, e.g.. More details and a discussion on this delicate stage is given below. +4. Define the training loss – a penalty function that exploits the availability of X and +6By ‘large’ we mean thousands and sometimes millions of images, and the more the better. Often-times, +the training itself may rely on several hundreds of images, and these are augmented by randomized operations +such as crop, scale-down, rotations, and more. + +12 +M. ELAD, B. KAWAR AND G. VAKSMAN +Y and the defined parametric denoiser DΘ(y, σ), posing a cost value to be minimized +with respect to Θ, encouraging the denoised images to be close to their corresponding +ideal ones. Such a functional could be of the form +L(Θ) = +M +� +k=1 +dist (xk, ˆxk) = +M +� +k=1 +dist (xk, DΘ(yk, σ)) , +(4.1) +where dist(x, ˆx) is a distance function between the ideal and the denoised image, such +as MSE – dist(x, ˆx) = ∥x − ˆx∥2 +2. +5. Minimize L(Θ) with respect to Θ via stochastic gradient descent [24] applied on small +batches of training pairs (xk, yk), and exploiting back-propagation [242]. +Once all the above steps are completed, the denoiser ˆx = DΘ(y, σ) is ready to be deployed +on newly incoming images, expected to perform better or worse in noise removal, depending +on the size and quality of the training set, the similarity between the image to be denoised +and the training set, the chosen architecture, and the quality and the hyperparameters of the +optimization process. +A variant of the above is blind denoising, in which σ is unknown. The straightforward approach +towards this task is brute-force learning. This means that for every ideal image x we produce +a sequence of noisy versions yσ +k with varying values of σ in the range we aim to cover. Then +learning is done by minimizing a loss that integrates over all the noise levels, +L(Θ) = +� +σ +M +� +k=1 +dist (xk, DΘ(yσ +k)) . +(4.2) +Observe that in this case the denoiser DΘ gets only the noisy image without σ. An interesting +alternative to the above was discovered in [201], showing that a bias-free architecture becomes +robust to the noise power, and thus a simple training for a single value of σ generalizes well +to other levels of noise. +An amazing consequence of all the above description is this: All the glorious work on image +priors that fueled the design of classical denoisers and other tools in image processing seem +to have become totally obsolete. Observe that in this supervised deep learning approach we +have no need nor room for all the knowledge and know-how that have been accumulated +carefully over decades of extensive research and engineering work. Is this a fair description of +the current state of things in our field? To a large extent, the sad answer is positive, while +some reservations to this conclusive statement will be discussed in Section 5. +The emergence of deep learning techniques and their new abilities brought a new evolution +of ideas on the design and span of image denoisers. While this literature is vast and rich, +we describe below several key trends in this progress, in an attempt to expose both the new +abilities obtained, and the new ideas accompanying them. These come in several fronts: +• Better Denoisers: Improving image denoising capabilities via deep learning be- +came a natural new front, where the aim is to perform better in terms of Peak- + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +13 +Signal-to-Noise-Ratio (PSNR) on an agreed-upon corpus of test images. This is mani- +fested by an evolution of architectures that started with simple feed-forward Convolu- +tional Neural Networks (CNN) [330], proceeded to more advanced structures, such as +UNet [234, 115], and all the way to the recently introduced Transformers [78, 165, 322]. +In Figure 4.1 we illustrate this trend by presenting a graph that shows the progress +in denoising PSNR on the well-known BSD68 dataset [190]. More details on each of +these algorithms is brought in Appendix C. +2006 +2008 +2010 +2012 +2014 +2016 +2018 +2020 +Year +27.5 +28 +28.5 +29 +29.5 +PSNR +K-SVD +BM3D +FoE +LSSC +EPLL +MLP +WNNM +CSF +TNRD +DnCNN +IRCNN +NLRN +MVCNN +N3Net +FFDNet +FOCNet +RIDNet +GCDN +SwinIR +DRUNet +Figure 4.1: Denoising performance on the BSD68 dataset [190] with σ = 25 (K-SVD [89], +BM3D [61], FoE [237], LSSC [181], EPLL [347], MLP [33], CSF [252], WNNM [110], +TNRD [50], DnCNN [330], IRCNN [331], NLRN [167], MVCNN [168], N3Net [218], FFD- +Net [332], FOCNet [133], RIDNet [8], GCDN [292], SwinIR [165], DRUNet [329]). +• Different Training Schemes: We described above the most obvious, supervised, +training strategy, where we gather pairs of ideal images and their noisy version. +Various unsupervised alternatives have been also developed for this task, such as +Noise2Noise [160], Noise2Void [152], Noise2Self [16], SURE-based denoising [337, 175, +207], and others, all aim to operate on noisy images directly without the need for an +access to their clean versions. It should be clear, though, that these techniques become +relevant only in cases where the noise does not follow a known analytic structure, as +otherwise the supervised alternative would be preferred. Another appealing approach +that adopts an unsupervised denoiser training is “Deep Image Prior” (DIP) [286], +where a network is trained on a single image to best fit itself. +An early stopping +of this learning is shown to yield an effective denoising, revealing the regularization +capabilities of the UNet architecture. +• True Noise Removal: We mentioned above the Noise2X line of work [160, 152, +16], which enables denoising of images without access to their clean versions. This + +14 +M. ELAD, B. KAWAR AND G. VAKSMAN +ability becomes crucial when operating on images with un-modeled and unknown noise +statistics. In such cases, learning should rely on more fundamental forces, such as self- +similarity in images, the slow tendency of regressed neural networks to recreate noise +from noise, the joint information that exists in burst of frames, and more. More broadly +speaking, removal of true noise from images is a relatively new topic in image denoising, +as it has hardly been addressed in the classical era due to its evident complexity. +With advanced self-supervised and unsupervised learning techniques, new impressive +abilities were created [298, 149, 170, 285, 301, 128]. +• Image adaptation: This refers to the ability to take an already designed/trained +denoiser and adapt it to perform better on unique images that deviate from the training +set. This way, general purpose denoisers could be boosted when operating on scanned +documents, astronomical images, cartoon images and more. +The adaptation itself +could be done in various ways, the most natural of these is the following [289]: Given +a noisy yet unique image to be cleaned, apply first the available denoiser DΘ0 and +obtain ˆx0 = DΘ0(y, σ). Now retrain the denoiser (i.e. update the parameters Θ) by +minimizing dist (ˆx0, DΘ(y, σ)). Similar to the core idea behind Noise2Noise [160] and +DIP [286], few gradient steps of this minimization are expected to go in the proper +direction and yield a more informative and relevant denoiser, thus boosting the result +for this specific image. The final outcome is obtained by ˆx = DΘ(y, σ), using the +slightly updated parameters Θ. +• Addressing Different Objectives: When describing the supervised learning strat- +egy of denoisers, we offered the L2 loss that considers PSNR performance. Over the +years this quality measure took the lead in most papers, despite its known weaknesses. +Indeed, our community has been constantly striving to get the MMSE denoiser, if not +in body, then at least in spirit, and this is evident from the PSNR performance tables +that appear in almost every paper on image denoising published over the years. As +we argue later on in Section 9.1, while MMSE denoisers are of great value by them- +selves, their outcome is not necessarily visually appealing, being an average over many +potential solutions. +Bearing this in mind, the learning paradigm creates a new opportunity for serving +“new masters” – recall that the learning loss function is highly non-convex, and yet we +have no fear of its complexity when training the neural networks. Thus, we can easily +replace the pleasant L2 by more sophisticated or adequate penalties. The immediate +alternative that comes to mind is SSIM [299], which offers a more robust distance +measure between images by considering structural similarity. +We could go further +and consider perceptual losses such as LPIPS [335], that is further robustified by a +learned representation in order to fairly assess proximity between images. This trend +can be characterized as an attempt to produce visually pleasing and crisp images from +the denoisers, ones that will surpass the MMSE alternative. A step forward in this +direction takes us to Generative Adversarial Networks (GANs) for denoising [69, 67, +212]. The idea is to challenge the output of the denoiser, by feeding it into a classifer +that should tell apart true images versus denoised ones. By leveraging this classifier’s + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +15 +guidance, the denoiser can learn to produce better looking images. +We will come +back to this idea in Section 9.1, offering an improved approach that targets perfect +perceptual quality results. +The description given above provides nothing but a glimpse into a very vibrant and rich body +of literature that finds image denoising as an appealing playground for research. Still, we stop +the survey of deep learning for denoising here, as our prime goal is the denoisers themselves +and algorithms building on top of them. +As one final note, observe that all the preceding discussion on classical and modern denoisers’ +design is given without referring to color images. Indeed, the formulation in this paper con- +siders a grayscale image x, yet most denoisers, old and new, are typically required to process +color (Red-Green-Blue) images. Some of the existing methods discussed above are easily ex- +tended to color by operating on the three chroma channels jointly. For example, NLM [32] and +K-SVD denoising [89] operate on RGB patches directly by flattening them to longer vectors. +Another approach is to turn to the YUV or YCbCr color-space, and operate on the luma +(Y) and the chroma (Cb/Cr or U/V) layers independently, as BM3D does [61]. Denoisers +based on deep neural networks typically handle color directly by feeding the RGB image as a +3-dimensional tensor input to the network, processed by subsequent 3D convolutions. More +intricate approaches do exist, in which the geometrical interplay between the color layers is +taken into account more adequately [255]. +5. Synergy between Classics and Deep Learning. With the description given above, the +reader may (rightfully!) get the impression that the vast knowledge regarding image denoising +gathered during the classical era has become obsolete with the emergence of the deep learning +alternatives. However, this claim is not entirely correct. In reality, the themes investigated +and promoted by classical algorithms are serving as the foundations for building DL denoisers, +even if practiced implicitly, and these are mostly manifested by the choice of architectures to +be used. To illustrate this, we mention several well-known key concepts of classical image +denoising algorithms, and show their impact on DL architectures: +• Locality: Most information relevant to restoring a pixel’s value in denoising is con- +tained in its local neighborhood. In classical algorithms, this concept is embodied using +patch processing, local filtering, local image priors, and more. When it comes to DL +schemes, many denoisers choose convolutional layers as their primary processing path, +which leads to architectures with small to moderate receptive fields [308, 330, 332]. +• Sparsity under appropriate transforms: Local image patches are expected to be +sparse when represented using certain 2D transforms. On the classical side, several +of the priors listed in Table 3.1 fall into the sparsity-promoting category. On the DL +side, a similar treatment can be observed, where the commonly used ReLU activation +promotes sparsity by nulling the negatively activated neurons [101]. +• Self-similarity: Most image patches have similar twins at other locations in the same +image. While classical algorithms usually harness this property by gathering similar +patches and processing them jointly, some recent DL schemes leverage self-similarity +using self-attention layers [165, 317]. + +16 +M. ELAD, B. KAWAR AND G. VAKSMAN +Unfortunately, these and other concepts inherited from the classical era do not provide a +constructive answer to the main question DL faces: How should we choose the appropriate +architecture for the denoising task? Researchers facing this question are usually taking one +of the two following options: (i) Copy: adoption of an existing architecture that has been +demonstrated to lead to good results in a similar task (e.g., DnCNN, UNet, ResNet, and +more) [330, 234, 117]. Usually such an adoption is accompanied by some minor modifications +such as changing the number of channels or layers, etc.; or (ii) Trial and error: gathering +an architecture by piling a mix of known building blocks such as convolutions, strides, batch +normalization steps, ReLU, fully-connected layers, down- and up-scaling, skip-connections, +and more. +Both these options seem to work rather well, leading to networks achieving very good practical +results – see [330, 332, 165]. However, this brute-force approach typically tends to end up with +heavy and cumbersome architectures, relying on millions of trainable parameters, making the +resulting networks expensive to use and hard to train. Another downside in such architec- +tures is their lack of explainability. While this may seem unimportant, having a black-box +denoiser with no explainability implies an inability to leverage it to other tasks (e.g., image +separation [199, 91, 153]), or probe it for identifying origins of failures for ill-treated regions in +the image. More broadly, the brute-force approach towards architecture design for denoisers +may require a lengthy trial and error process and may end up hitting a performance barrier. +An alternative to copying or guessing architectures does appear in recent literature, known as +unfolding [109, 341, 250, 83, 204]. This approach constructs the neural network so as to mimic +the computational stages of a well motivated algorithm. The term unfolding has to do with +the fact that many classical image denoising methods involve iterative algorithms, and thus +networks mimicking these should unfold their iterations to a feed-forward computational path. +This approach typically produces concise and perfectly explainable networks, both in terms +of the learned parameters and the activations obtained, which are easier to train. In addition, +such networks tend to be easily and effectively adapted to different data. There are various +examples in the literature for the unfolding approach for various regression tasks, e.g. [313, 58, +328, 125, 289, 250, 204]. Here we briefly describe two such methods for illustrative purpose: +Deep K-SVD [250] and LIDIA [289]. +Both propose a conversion of a classical denoising +algorithm into a deep neural network architecture. +5.1. Deep K-SVD. Deep K-SVD [250] is an unfolding version of the K-SVD image de- +noising algorithm [89]. We start with a brief explanation of the original K-SVD method, and +then turn to describe its unfolding. +K-SVD denoising is based on sparse representation theory for constructing the image prior [4]. +Consider a clean image x and patch extraction operators {Rk}k such that Rkx ∈ Rn are image +patches of size √n × √n taken from location k in the image. The sparsity-promoting prior +assumes that any such patch, Rkx, can be represented as a linear combination of f ew columns +(also referred to as atoms) from a redundant dictionary D ∈ Rn×p (redundancy implied by +p > n), i.e., +(5.1) +Rkx = Dαk , + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +17 +Sparse +Coding +Patch +Decomposition +Patch +Averaging +Patch +Denoiser +Patch +Reconstruction +y +MN + + + +z +y +n +k +k +i  += R +  +ˆz +n +k +k  +ˆx +MN + +z +n +k  +α +p +k  +ˆz +α +n +k +k + + +D +Noisy image +M +N + +Reconstructed image +M +N + +λ Evaluation + +Figure 5.1: End-to-end architecture of the Deep K-SVD network [250]. +where αk ∈ Rp is a sparse vector, ∥αi∥0 ≪ n. Armed with this assumption, K-SVD poses the +following minimization problem: +(5.2) +min +{αk}k,x +µ +2 ∥x − y∥2 +2 + +� +k +� +λk ∥αk∥0 + 1 +2 ∥Dαk − Rkx∥2 +2 +� +, +where y is the given noisy image, and µ and λk are hyper-parameters. In this expression, the +first term is the Log-Likelihood that requires a proximity between the reconstructed image +x and the noisy image y. The second and third terms represent the sparse representation +prior, demanding that every image patch Rkx in every location k has an approximate sparse +representation αk. +The K-SVD algorithm solves this minimization problem by applying the following two steps +iteratively: (i) Fix x (initialized by x = y) and update the vectors {αk}k; and (ii) Update x +while freezing the sparse representation vectors. The first is referred to as the sparse coding +stage, where each patch in the contemporary solution obtains a sparse representation via the +Orthogonal Matching Pursuit (OMP) greedy algorithm [214]. The second step becomes a +quadratic minimization task, its solution being a simple variation of patch-based averaging. +A single round of the above two steps has been shown to suffice for getting very good re- +sults [89], and a repetition of this round several times could further boost the results [347]. +The dictionary D in the above process could be either universal – pretrained to best sparsify +natural image content, or image adaptive – updated to the image y itself within the above +optimization. +We now turn to describe the Deep K-SVD algorithm, which adopts the universal dictionary +approach. The end-to-end architecture referring to a single round is illustrated in Figure 5.1. +This neural network consists of three main blocks: Patch Decomposition, Patch Denoiser, and +Patch Averaging, all following closely the very same steps described above, with appropriate +adaptations. Patch Decomposition breaks the input image y into a set of fully overlapped +patches {zk}k = {Rky}k. The next block, Patch Denoiser, is applied patch-wise, but replaces + +18 +M. ELAD, B. KAWAR AND G. VAKSMAN +the OMP by LISTA [109], in which zk undergoes sparse coding via a differentiable shrinkage- +based iterative algorithm [109]. These inner iterations are unfolded as well to create a feed- +forward computational path that starts with zk and ends with ˆzk = Dαk. Due to the gap +between OMP and LISTA, a sub-network of fully-connected layers computes the value of λk +for the incoming patch zk. The last block, Patch Averaging, rebuilds the reconstructed image +ˆx by averaging the cleaned patches ˆzk using learned weights. +This Deep K-SVD network is trained end-to-end by minimizing the MSE distance between +the ideal and denoised images for a set of M training images, +(5.3) +L (Θ) = +M +� +k=1 +∥xk − ˆxk∥2 +2 = +M +� +k=1 +∥xk − DΘ (yk)∥2 +2 , +where {xk, yk}k is a set of clean and noisy image pairs to train on. +DΘ is the denoising +network, where Θ stands for all trainable parameters, consisting of the dictionary D, the +parameters of the sub-network that evaluates λk and the shrinkage thresholds. +Despite the close resemblance between the original algorithm and its unfolded version, the +later performs much better [250], surpassing classical methods and aligning with deep-learning +based techniques. This should not come as a surprise as the unfolded denoiser DΘ is trained +in a supervised manner, being fully aware of the task it serves, whereas the original algorithm +relied on a “guessed” image prior. Interestingly, the universal dictionary obtained for DΘ +is markedly different from the one trained off-line for the original K-SVD denoising method, +again a testimony to the major difference between the two design strategies. +5.2. LIDIA - Lightweight Learned Image Denoising. Another example of unfolding- +based denoising is LIDIA [289], which mimics the computational stages of the BM3D [61]. As +already mentioned in Section 3.3, BM3D harnesses two prime forces for its denoising goal – +sparsity and self similarity. The first relies on the assumption that local image patches are +sparse under the 2D-DCT spatial transform; the later is reflected by operating on groups of +similar patches jointly, forcing sparsity again by transforming across these patches. +Patch +Decomposition +Patch +Combining +y +MN + + + +z +y +n +k +k +k  += R +ˆx +MN + +Noisy image +M +N + +Reconstructed image +M +N + +Nearest +Neighbor +Search + + +n m +k +k + + +Z +Filtering +Network +  +ˆz +n +k +k  +Figure 5.2: The LIDIA denoising computational path. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +19 +T1ZkT2 +ReLU +T3ZkT4 +n m +k + + +Z +ˆ +s t +k + + +Z +Figure 5.3: The Transform-ReLU-Transform block. Applying the matrices T1 and T2 trans- +forms the input, Zk, to a space in which patches are supposed to be sparse; the matrices T3 +and T4 transform the outcome to the pixel domain. Observe that the transform applied on Zk +is separable – T1 is applied within patches while T2 operates across. This enables a reduction +of the size of the matrices T1, . . . , T4 in order to enable their training. +LIDIA’s core computational path is shown schematically in Figure 5.2. This neural network +starts by breaking the input image y into a set of fully overlapping patches {zk}k of size +√n × √n. +Then, each patch, zk ∈ Rn, is augmented with a group of its m − 1 nearest +neighbors, forming a matrix Zk of size n × m. +The filtering is applied patch-wise – each +matrix, Zk, undergoes a series of blocks composed of a separable transform, ReLU, and +another separable transform, as shown schematically in Figure 5.3. This mimics the BM3D +operation by transforming the input matrix to a space in which local patches are believed to +be sparse, forcing sparsity using the ReLU layer, and transforming back the outcome to the +pixel domain. Unlike BM3D, the transforms are trainable and are not restricted to be the +inverse of each other, nor forced to be square matrices. In addition, LIDIA includes a multi- +scale treatment, simultaneously processing patches in several scales. During processing, the +corresponding patches from different scales are fused using a learned joint transform. Finally, +the reconstructed image is obtained by returning the denoised patches to their original places +while averaging overlaps using learned weights. +The LIDIA network is trained end-to-end (excluding the nearest-neighbor part) by minimizing +the MSE loss, similar to the loss in Equation 5.3, applied on a set of M training images. The +network can be trained for a specific noise level σ or blindly, aiming to serve a range of σ +values. LIDIA performs much better than the original BM3D algorithm since it uses learned +rather than fixed transforms. Compared with other deep-learning techniques LIDIA achieves +comparable results, while using a small fraction of the typical number of learned parameters. +In Section 4 we mentioned the ability to adapt a given denoiser to newly coming images that +deviate from the training set. This adaptation starts by applying the trained denoiser, and +then uses the output in order to fine-tune the denoiser parameters by applying few gradient +steps. This rationale has been successfully demonstrated with LIDIA, and two such illustrative +results are brought in Figure 5.4. +5.3. Summary - The classics is still here. We described two unfolding instances in which +classic denoising algorithms provide their architecture for the learned network. These and +other such methods [341, 250, 83, 204], targeting various image recovery tasks, offer a con- +structive path towards well-motivated, low complexity and explainable neural architectures. +In the quest for a synergy between classical denoising methods and novel deep-learning alter- + +20 +M. ELAD, B. KAWAR AND G. VAKSMAN +(a) Clean +(b) Noisy, σ = 50 +(c) Denoised, 24.22dB +(d) Adapted, 26.34dB +(e) Clean +(f) Noisy, σ = 50 +(g) Denoised, 22.10dB +(h) Adapted, 25.82dB +Figure 5.4: Image adaptation via LIDIA: The original denoising network is trained for general +content images, and performs reasonably well for astronomy and scanned document inputs. +A substantial boost in denoising performance is obtained for these two examples, due to their +deviation from the training set. +natives, this is probably the most natural manifestation of it. +6. Image Denoising – Migration towards Recent Discoveries. The clear conclusions +from the above discussion are these: Highly effective image denoisers for AWGN removal, +D(y, σ), are definitely within reach, and the better ones are likely to be deep-learning based +algorithms. In an attempt to illustrate these statements, Figures 6.1 and 6.2 present denoising +results for two test images, two noise levels (σ = 15, 50) and by several denoisers – NLM [32], +BM3D [61], DnCNN [330], and SwinIR (a transformer-based denoising network) [165]. As +can be seen, the results are very impressive and more so by the later deep neural network +solutions. +We now turn to ask far more daring questions with regard to such denoisers, focusing this +time on their deployment to other tasks. More specifically, we discuss three such questions, +each corresponding to a recent discovery in the field of imaging sciences: +• Discovery 1: Can we leverage a denoiser D(y, σ) for solving general linear inverse +problems? As we shall shortly see, the answer to this question is positive and construc- +tive, opening new horizons for design of recovery algorithms and their regularization. +• Discovery 2: Can we leverage a denoiser D(y, σ) for synthesizing (hallucinating) +high-quality images, fairly drawn from the prior probability density function p(x)? +Here again the answer is positive and constructive, offering a thrilling new line of +activity in machine learning. +• Discovery 3: If hallucination of perfect-looking images is achievable, can we revisit + +Amega +ation'scAmega +ationscAmega +ation'scAmega +ation'scIMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +21 +(a) Clean +(b) Noisy, σ = 50 +(c) NLM, 24.67dB +(d) BM3D, 26.31dB +(e) DnCNN, 26.70dB +(f) SwinIR, 27.31dB +Figure 6.1: +Demonstration (1) of several denoising methods: +(NLM [32], BM3D [61], +DnCNN [330], SwinIR [165]). +the topic of general linear inverse problems and leverage a denoiser D(y, σ) for their +solution while targeting perfect perceptual quality results? Here again we give a positive +answer, and lead to a new and inspiring branch of research in inverse problems, offering +novel view of their treatment. +Below we discuss each of these discoveries in greater details. It is our sincere belief that these +together form one of the most exciting eras for our field, marking a major transition in how +image processing is perceived and practiced. +7. Discovery 1: +Solving Inverse Problems via Image Denoisers. Given a denoiser +D(y, σ) : RN → RN, our goal is to use it somehow for solving general linear inverse problems +of the form +y = Hx + v, +(7.1) + +22 +M. ELAD, B. KAWAR AND G. VAKSMAN +(a) Clean +(b) Noisy, σ = 15 +(c) NLM, 33.82dB +(d) BM3D, 36.23dB +(e) DnCNN, 36.33dB +(f) SwinIR, 37.17dB +Figure 6.2: +Demonstration (2) of several denoising methods: +(NLM [32], BM3D [61], +DnCNN [330], SwinIR [165]). +where H ∈ RM×N is a known matrix, v ∈ RM is AWGN, and y ∈ RM is the given mea- +surement vector. +Observe that H = I stands for the denoising problem. +Therefore, the +current discussion extends our view to a wider family of tasks in imaging sciences, cover- +ing applications such as deblurring, inpainting, demosaicing, super-resolution, tomographic +reconstruction, compressed sensing, and more. +Following the derivations in Section 3 and specifically Equation (3.3), we can adopt the +Bayesian point of view and obtain the MAP estimation for this family of problems: +ˆxMAP = arg min +x +�∥Hx − y∥2 +2 +2σ2 +− log (p(x)) +� +. +(7.2) +Plugging in the Gibbs distribution form for the prior, p(x) ∼ exp{−ρ(x)}, this becomes +ˆxMAP = arg min +x +� +∥Hx − y∥2 +2 + c · ρ(x) +� +. +(7.3) + +142551425514255142551425514255IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +23 +Clearly, the greatest riddle posed above has to do with the identity of the energy function +ρ(x). Can a denoiser serve all linear inverse problems in a unified approach by providing a +connection or an alternative to ρ(x)? Surprisingly, the answer to this question is positive and +constructive. +The seminal Plug-and-Play Prior (PnP) work by Venkatakrishnan, Bouman +and Wohlberg [295] was the first to provide such an answer7, followed and improved upon by +RED (Regularization by Denoising) [231]. These and their various extensions and variations +have created a vivid and stimulating sub-field of research in imaging sciences [28, 139, 283, +34, 268, 41, 192, 280, 5, 49, 55] in which denoisers play a central role. Below we describe PnP +and RED in more detail, and then turn to describe another, perhaps better founded, bridge +between denoisers and the energy function ρ(x) via the score function. This would serve our +next step towards diffusion models, as they unravel in Section 8 and beyond. +7.1. Plug-and-Play Prior (PnP). PnP [295] suggests the following steps in handling the +minimization of the problem posed in Equation (7.3): We start by splitting the variable x by +defining z = x and expressing each of the two penalties with a different variable: +ˆxMAP = arg min +x,z +� +∥Hx − y∥2 +2 + c · ρ(z) +� +s. t. +z = x. +(7.4) +The next step forms the Augmented Lagrangian of the above problem, converting the con- +straint into a penalty, +L(x, z, u) = ∥Hx − y∥2 +2 + c · ρ(z) + λ∥z − x + u∥2 +2 − λ∥u∥2 +2, +(7.5) +where u is the scaled dual variable and λ is an (arbitrary) penalty weight (see more in [295]). +The third and final step applies ADMM [27] for the minimization of L(x, z, u) with respect to +x and z while updating u. These are obtained by alternating between the treatment of each +variable while fixing the others: +x ← arg min +x +� +∥Hx − y∥2 +2 + λ∥z − x + u∥2 +2 +� += +� +HT H + λI +�−1 � +HT y + z − u +� +, +(7.6) +z ← arg min +z +� +c · ρ(z) + λ∥z − x + u∥2 +2 +� +, +(7.7) +u ← u + (x − z). +(7.8) +In the above, the first update equation amounts to a simple Least-Squares, which does not +involve ρ(x). The true drama takes place in the second update formula – observe its close +resemblance to Equation (3.3), which formulates an image denoising task. Indeed, instead of +choosing/guessing/learning ρ(x), we can apply our favorite denoiser ˆz = D(x−u, σ0) where σ0 +should be inversely proportional to λ/c. This way, PnP offers an appealing iterative algorithm +that repeatedly applies a denoiser in order to handle any underlying inverse problem, just as +promised. +While the original PnP paper did not dive into the issue of convergence of the above ADMM +algorithm, nor posed conditions on the denoiser to support such guarantees, later work offers +such a theoretical discussion – we refer the interested readers to [42, 309, 269, 155]. +7We should note that an alternative, yet closely related, derivation is offered in [196] from an approximate +message passing point of view. + +24 +M. ELAD, B. KAWAR AND G. VAKSMAN +7.2. Regularization by Denoising (RED). An alternative angle towards the relationship +between ρ(x) and image denoising is presented in [231]. The core idea is quite simple, using +the following explicit formula for ρ(x) that relies on a denoiser: +ρ(x) = xT [x − D(x, σ0)] . +(7.9) +The intuition behind this expression can be uncovered by considering a linearized form of +the denoising process, D(x, σ0) = S(x)x, where S(x) is an image-dependent matrix that +represents the smoothing applied by the noise removal process. This way, the chosen energy +function becomes ρ(x) = xT [I − S(x)] x, which is a Laplacian smoothness prior of the kind +described in Section 3, although being image-adaptive (and thus far more effective). +The work in [231] shows that if the denoiser D(x, σ0) is differentiable, passive and of symmetric +Jacobian, the chosen energy function in Equation (7.9) is guaranteed to be convex. If, in +addition, the denoiser satisfies a local homogeneity property8, then the following relationship +holds: +∇xρ(x) = 2 [x − D(x, σ0)] . +(7.10) +This relationship is a centerpiece in the construction of several RED algorithms. Plugging +the chosen ρ(x) from Equation (7.9) into Equation (7.3) implies that the gradient of this +functional is easily accessible, requiring a single activation of the chosen denoiser. Critically, +this gradient does not require the differentiation of D(x, σ0), which would have required far +more computational power and memory consumption. As a consequence, various gradient- +based optimization strategies can be applied for computing ˆxMAP , and all are guaranteed to +converge to the global minimizer of the MAP penalty. Again, we arrive at iterative algorithms +that apply simple linear operations and a denoiser in each step, aiming to solve general linear +inverse problems. +An intriguing question with respect to the above is the identity of the denoiser to use within +RED. Should it be an MMSE denoiser? Should it be designed to remove AWGN? Would these +choices lead to the required properties mentioned above (diffentiability, symmetry, passivity, +homogeneity)? What should σ0 be? Partial answers to these questions are given by the next +discussion on the score function. +7.3. The Score Function and its Relevance to Inverse Problems. Embarking from Equa- +tions (7.2) and (7.3), we now present a very different approach towards getting to the same +RED formulation, regularizing inverse problems via a denoiser. Assume that our goal is to +find ˆxMAP by Steepest Descent (SD), and thus our iterative formula should be +ˆxk+1 = ˆxk − µ +� +HT (Hˆxk − y) − c · ∇x log p(x)|ˆxk +� +. +(7.11) +The term ∇x log p(x) is known in the statistical literature as the score function, being a flow- +field that describes the optimal ascent direction over the log of the prior. An old mathematical +8See [231] for the exact definitions of these ingredients and for the proof of their implications. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +25 +result, commonly attributed to Miyasawa [200], Stein [265], or Tweedie [84], and re-exposed +in [138], proves that +(7.12) +∇y log p(y) = D(y, σ0) − y +σ2 +0 +, +where y = x + v is a noisy version of x with v ∼ N(0, σ2 +0I), and D(y, σ0) should be the +optimal Minimum Mean Squared Error (MMSE) denoiser, E(x|y). A proof of this result is +brought in Appendix D. +While it is impossible to obtain the MMSE denoiser (as p(x) is unknown), modern deep +learning-based denoisers perform very well (see Figure 4.1), and therefore constitute a good +approximation for it. And so, while Equation (7.11) expects to use the score function that +refers to p(x), a denoiser can provide an approximation of it that considers a slightly blurry +probability density function9 p(y) = p(x) ⊗ N(0, σ2 +0I). +When σ0 is small enough10, this +approximation becomes very effective and the resulting algorithm admits the following update +rule: +ˆxk+1 = ˆxk − µ +� +HT (Hˆxk − y) + c (xk − D(ˆxk, σ0)) +� +, +(7.13) +which is exactly the SD version of RED [231]. +7.4. Summary: Denoisers for Solving Inverse Problems. Figures 7.1 and 7.2 present +illustrative results of PnP [295], RED [231], and NCSR [74] for deblurring and single-image +super-resolution. Note that while NCSR is specifically tailored to handle these two applica- +tions, PnP and RED are unaware of the underlying task, and use a given denoiser. The tests +presented employ both a simple median filter and the TNRD denoiser [50]. Surprisingly, even +a plain denoiser as the median filter can provide some recovery effect. More details on these +experiments and more results can be found in [231]. +PnP and RED have drawn much interest in our community in the past several years. Followup +work has been considering a theoretical analysis of the two methods [42, 278, 225, 94, 309, 269], +deployment of the proposed algorithms in various applications [263, 28, 139, 49], creation of +new variants of these two methods [283, 279, 268, 280, 267, 123, 56], and more. An appealing +outlet of this work returns to the unfolding idea discussed in Section 5: PnP/RED can be +used to define well-motivated architectures for solving general inverse problems, by unfolding +the proposed algorithms, and then training the repeated denoiser to best serve a series of +inverse problems jointly. +This way, by plugging in the degradation operator H, a single +network can treat a variety of tasks in image processing, built around a core learned denoising +engine [194, 229, 73, 192, 333]. +8. Discovery 2: Image Synthesis via Image Denoisers. The deep learning revolution has +enabled several capabilities that were previously thought to be practically impossible. Among +the most intriguing such capabilities is image synthesis – the ability to generate a variety +9See Appendix D for a justification of this claim. +10RED [231] suggests to use σ0 ≈ 3 − 5 for images with 256 × 256 gray-values. + +26 +M. ELAD, B. KAWAR AND G. VAKSMAN +(a) Ground Truth +(b) Input 20.83dB +(c) RED (Median) 25.87dB +(d) NCSR 28.39dB +(e) PnP (TNRD) 28.43dB +(f) RED (TNRD) 28.82dB +Figure 7.1: Visual comparison of deblurring results by PnP and RED. NCSR [50] is brought +as a reference to compare with. +of natural-looking images, without conditioning on any kind of input or initialization. More +formally, the goal of image synthesis is to obtain a random generator whose outputs follow +the prior distribution of images x ∼ p(x). Succeeding in this task would testify that we have +seized the true distribution of images, and this may aid in solving a variety of imaging tasks. +A common theme in the definition of such image generators is the need to design of a learned +machine GΘ(z), which admits a simply distributed input vector z (e.g., z ∼ N(0, I)) and +converts it to a valid sample from p(x). +GΘ(z) is a neural network parameterized by Θ, +and various techniques were conceived in the past decade for learning Θ for best fitting the +synthesized results with the destination PDF. In this context, the main tool of interest, which +popularized image synthesis, is called GAN – Generative Adversarial Network [107]. While +alternatives to GANs do exist, such as Variational Auto-Encoders (VAE) [151], Normalizing +Flow (NF) techniques [228, 150], Autoregressive models [293], and energy-based methods [118, +79], GANs were typically at the lead in image generation. Since their introduction and until +recently, GANs have undergone various improvements [222, 11, 112, 327], and achieved stellar + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +27 +(a) Ground Truth +(b) Bicubic 20.68dB +(c) RED (Median) 24.44dB +(d) NCSR 26.79dB +(e) PnP (TNRD) 26.61dB +(f) RED (TNRD) 27.39dB +Figure 7.2: Visual comparison of super-resolution (3:1) results by PnP and RED. NCSR [50] +is brought as a reference to compare with. +performance [29, 141, 249]. However, this changed dramatically with the arrival of diffusion +models [257, 260, 120]. +GANs, and the other generative models mentioned above, are detached from the topic of +image denoising. In contrast, diffusion models heavily rely on the score function and thus +on image denoisers for addressing the task of image synthesis. This recent line of work that +started to gain traction, aptly named score-based generative models [260, 261] or denoising +diffusion probabilistic models [257, 120], utilizes deep learning-based denoisers to approximate +the score function, which is then used in an iterative algorithm to obtain images x that are +fair samples from the PDF p(x). +The iterative algorithms used for generation in this context are largely based on Langevin dy- +namics [230, 19], a Markov Chain Monte Carlo (MCMC) method with the following transition +rule: +(8.1) +xt+1 = xt + α∇xt log p(xt) + +√ +2αzt, +where zt ∼ N(0, I), and α is an appropriate small constant. Initialized randomly, after a +sufficiently large number of iterations, and under some mild conditions on p(x), this process +converges to a sampling from the distribution p(x) whose score function is used [230]. Intu- +itively, the algorithm follows the direction of the gradient of the log-probability, climbing from +one image to a more probable one. This is a gradient ascent process, and the noise is added + +28 +M. ELAD, B. KAWAR AND G. VAKSMAN +in each iteration to provide stochasticity, which effectively leads to sampling from p(x) rather +than converging to a local maximum. +While it is tempting to use the true data distribution’s score function in Langevin dynamics, a +few problems prevent such a use [260]. One of the main issues lies with the well-known cardinal +manifold assumption [239], which relies on the observation that natural images reside on a +low-dimensional manifold in their embedding space. Therefore, for a random initialization of +x0, it holds with probability 1 that p(x0) = 0, rendering the score function undefined at best, +and without an ability to drift towards the image manifold in subsequent iterations. A possible +solution is to approximate p(x) by its slightly blurred counterpart p(y), where y = x + v, +v ∼ N(0, σ2I), with a very small σ [296]. This resolves the aforementioned problem, as the +Gaussian noise distribution has infinite tails. However, in practice, such a Langevin sampling +algorithm requires many thousands of iterations to converge [155], hindering its practical +applicability. +The authors of [260] suggest the Annealed Langevin Dynamics (ALD) algorithm11, which +considers a sequence of Gaussian noisy image distributions p0(y), p1(y), . . . , pL−1(y), pL(y) +with standard deviations σ0 > σ1 > · · · > σL−1 > σL. Applying a few iterations of Langevin +dynamics for each of the distributions, starting with a very large σ0 and ending with a very +small σL, enables a faster convergence. Each of these steps is applied using a denoiser that +estimates the score function, and the output of each such process is used to initialize the +next. This implies that the synthesis creates a chain of noisy images with diminishing levels +of noise, starting with pure canonical Gaussian noise and gradually carving out an image +content out of it. Intuitively, this translates to drawing from a wide distribution and then +gradually narrowing it, leading to faster sampling and better performance in image generation. +Algorithm 8.1 presents this image sampler: The outer loop sweeps through the L + 1 values +of σ, while the inner loop applies T Langevin steps for each. The score function ∇x log pi(x), +which stands for the σi-blurred PDF of x, is approximated by +∇x log pi(x) = D(x, σi) − x +σ2 +i +. +(8.2) +Observe that the step size α is modified throughout this process, chosen to be proportional +to σ2 +i . This aligns with the fact that larger σ values imply a more regular and smooth PDF, +which is easier to sample from.12 Figure 8.1 presents several examples of temporal steps in the +ALD process that starts with pure Gaussian noise and ends with a high-quality synthesized +image. +11A very similar algorithm has been proposed in parallel by [120]. Preceding these two works is the one +reported in [257] who proposed a similar process while relying on a different rationale borrowed from statistical +physics. +12A different explanation for this choice of the step size is given in [260], motivated by a desire to better +balance the norms of the score versus the additive noise in the Langevin update formula. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +29 +Algorithm 8.1 the Annealed Langevin Dynamics (ALD) algorithm +Input: {σi}L +i=0, ϵ, T +Initialize x0 ∼ N(0, I) +for i ← 0 to L do +αi ← ϵ · σ2 +i /σ2 +L +for t ← 1 to T do +Draw zt ∼ N (0, I) +xt ← xt−1 + αi [D(xt−1, σi) − xt−1] /σ2 +i + √2αizt +end +x0 ← xT +end +Output: x0 +The ALD algorithm sparked a wave of related works [261, 120, 262, 208, 287, 68, 122, 143, 121] +that continually improved the performance of these generative diffusion models, eventually +surpassing that of GANs [68]. +We show some of their results in Figure 8.2. +Neverthe- +less, these iterative algorithms are still considerably slower than GANs, so substantial work +has been invested in improving their speed without compromising significantly on gener- +ation quality [258, 135, 247], often achieving impressive speedup levels. +Diffusion models +have since become ubiquitous in many applications [142, 209, 21, 116, 6, 253, 254, 144], +prompting researchers to prepare surveys of their impact on the image processing field and +beyond [315, 60, 36]. +Figure 8.1: Temporal steps along 3 independent synthesis paths of the Annealed Langevin +Dynamics [260] algorithm, using a denoiser [261] trained on LSUN bedroom [319] images. +9. Discovery 3: High Perceptual Quality Image Recovery. We are now stepping into the +last and what we believe to be one of the most exciting topics in the story of image denoisers +– solving general linear inverse problems while striving for perfect perceptual quality, and +achieving this with the support of an MMSE denoiser. We start with the simplest inverse +problem – image denoising itself – and grow from there to more general recovery tasks. + +30 +M. ELAD, B. KAWAR AND G. VAKSMAN +Figure 8.2: Image generation results for CelebA-HQ [172] (left) and ImageNet [66] (right) +using score-based denoising diffusion generative models [262, 68]. +9.1. Revisiting the Image Denoising Problem. We return to the classic image denoising +problem, where y = x+v in a given noisy image, x ∼ p(x) is it’s ideal origin, and v ∼ N(0, σ2 +y) +is the AWGN. Our goal is to recover x, but now we change the rules of the game by expecting +high perceptual quality results. How could this be achieved? +Throughout the classical era of denoising, and well into the modern AI days, denoisers were +mostly evaluated using the Mean Squared Error (MSE) measure shown in Equation (2.2) (or +tightly related measures such as the Peak Signal-to-Noise Ratio – PSNR). As can be seen in +Figure 4.1, MSE has been and still is a commonly used performance measure for denoisers. The +MSE metric has several clear benefits: it is zero when the denoiser perfectly recovers the image, +it is intuitive to understand, and it produces mathematically elegant results for theoretical +analysis, as well as practical considerations such as ease of differentiation for optimization. +However, the MSE distortion measure suffers from a critical shortcoming: As discussed in +Section 2.1 and in Appendix A, the best possible result in MSE (MMSE), regardless of the +denoising method used to approximate it, would rely on a conditional expectation, +ˆxMMSE = arg min +ˆx +E +� +∥x − ˆx∥2 +2 +� += +� +x +xp(x|y)dx = E (x|y) . +(9.1) +In other words, when optimizing for MSE, our main goal is to get as close as possible to +the original image in expectation, and this implies an averaging over all possible solutions, +weighted by their posterior probability. Thus, depending on the geometry of the image man- +ifold and the severity of the noise, the MMSE solution may tend to be too blurry and of +relatively low probability p(ˆxMMSE), falling outside of the desired manifold. We illustrate +this phenomenon in a 2-dimensional example in Figure 9.1. +Indeed, the fact that MMSE denoising achieves optimal L2 distortion necessarily implies that +perceptual quality is compromised. The authors of [22] prove the existence of a “perception- +distortion tradeoff”: distortion (of any kind!) and perceptual quality are at odds with each + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +31 +Figure 9.1: A 2-dimensional qualitative demonstration of the disadvantages of MMSE denois- +ing. Given a noisy image, the MMSE denoiser falls outside of the image manifold, whereas +a posterior sampler would necessarily sample points that reside on it. This leads to better +perceptual quality in the denoising results. +other, and optimizing one necessarily deteriorates the other. +In this context, perceptual +quality is defined as the proximity between the original image distribution p(x), and the +denoised image one p(ˆx). Figure 9.2 presents the essence of these findings in [22]. +Figure 9.2: The perception-distortion trade-off [22]: Any recovery algorithm necessarily per- +forms on the blue-curve or above it. On the perception-distortion bound curve, the top-left +point refers to the MMSE estimation, while the right-bottom one (or right to it – see [22]) is +obtained by a posterior sampler. A gap of 3dB divides between the two when using the MSE +distortion measure. +With this tension between visual quality and distortion in mind, alternative approaches to +MSE were developed over the years, aiming for high perceptual quality denoising [69, 67, 212, +146]. One such technique is to sample from the posterior distribution: given a noisy image + +Legend: +Image manifold +MMSE result E(xly) +Noisy image +Probable samples from +the posterior distributionPerception +MMSE +3dB +Possible +Better visual quality +●Alg. 1 +Region +Alg. 2 +Alg. 3 +Alg. 4 +Impossible +Posterior +Region +Sampler +Distortion +Less Distortion32 +M. ELAD, B. KAWAR AND G. VAKSMAN +y, we aim to develop a denoiser that outputs ˆx ∼ p(x|y), i.e., samples from the posterior +distribution of pristine images given the noisy measurement. A successful posterior sampler +would achieve perfect perceptual quality, as when marginalizing over y, we get p(ˆx) = p(x). +It is important to notice that this technique involves a subtle paradigm shift – the denoiser +is no longer a deterministic function of the noisy input y, but rather a stochastic one and +this implies a multitude of possible solutions. In the following, we present two pragmatic +approaches for approximating posterior sampling behavior. +To traverse the perception-distortion tradeoff, a Waserstein Generative Adversarial Network +(WGAN) conditioned on noisy images can be used [22, 69]. Such a network consists of two +main elements: a generator, which takes a noisy image as well as a random vector as input, and +outputs a denoised image, and a discriminator, whose job is to distinguish between denoised +and original images. The discriminator is trained to discriminate between the generator’s +outputs and original images, while the generator optimizes two loss functions: the MSE with +respect to the original image, and the ability to “fool” the discriminator, thus encouraging its +output to “look like a real image” in the eyes of the discriminator. These two losses, as proven +in [22], are at odds with one another, and tuning their respective weights in the total loss +function translates to the traversal of the perception-distortion tradeoff. This idea is further +improved upon by [212]: instead of requiring low distortion on individual generator samples, +the requirement is made on their mean. This results in a loss function that encourages the +generator to act as a sampler from the posterior distribution, therefore attaining near-perfect +perceptual quality while remaining faithful to the input image. +An alternative posterior sampling approach, which reconnects with MMSE denoisers, is using +the annealed Langevin dynamics algorithm [260] presented in the previous section. Recall that +ALD uses the score function ∇˜x log pi(˜x) to sample from a prior distribution pi(˜x)13. In [146], +the regular ALD algorithm is extended to treat image denoising by analytically conditioning +the score function on a noisy input y – effectively sampling from the posterior distribution +pi(˜x|y). The algorithm is initialized with the noisy input y, which is then gradually denoised +using the conditional score function, obtained using the Bayes rule, +(9.2) +∇˜x log pi(˜x|y) = ∇˜x log p(y|˜x)pi(˜x) +p(y) += ∇˜x log pi(˜x) + ∇˜x log p(y|xt). +The term ∇˜x log pi(˜x) is the regular score function which can be approximated by an MSE- +trained denoiser. As for the other term, ∇˜x log p(y|˜x), observe that this likelihood can be +rewritten by exploiting two facts: (i) y = x + v is the noisy image (v ∼ N(0, σ2 +yI), and (ii) +˜x = x + z is the annealed solution (z ∼ N(0, σ2 +i I)), and thus +p(y|˜x) = p(y − ˜x|˜x) +(9.3) += p(x + v − x − z|˜x) += p(v − z|˜x). +13In these notations, pi stands for a σi-blurred PDF version of the original prior p(x), and ˜x is a temporary +synthesized image that contains annealing Gaussian noise with variance σ2 +i . + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +33 +If we assume statistical independence between the measurements’ noise and the annealing +one, v − z becomes a plain Gaussian vector. However, its conditioning on the knowledge of ˜x +leads to a dead-end, since this image contains z in it. The alternative, as developed in [146], is +to construct the annealing noise such that v − z is statistically independent of both z and ˜x. +This can be obtained by breaking the measurements’ noise v into small fragments, and assume +that their partial accumulations constitute the annealing noise in each of the stages. Thus, +v − z is a white Gaussian noise that has no correlation with the noise z, nor with the target +image x. Put in other words, this likelihood expression becomes simple when considering y +to be an even more noisy version of ˜x. This in turn makes p(y|˜x) a simple white Gaussian +distribution of the form N(0, (σ2 +y − σ2 +i )I). Therefore, +(9.4) +∇˜x log pi(˜x|y) = ∇˜x log pi(˜x) + y − ˜x +σ2y − σ2 +i +. +Plugging this modification into ALD turns Algorithm 8.1 into an image denoiser. Beyond +its ability to attain near-perfect perceptual quality, this approach has the advantage of not +requiring any special model training. Crucially, this finding shows that simple MSE denoiser +training is more powerful than originally thought – not only can it approximate MMSE de- +noiser behavior, but it can also perform denoising by posterior sampling under the Langevin +dynamics scheme. +Figure 9.3 presents a denoising result by the above-described method. +Several observations are in order from this figure: +• The generated results are indeed of very high perceptual quality; +• Running ALD several times results with different solutions, all valid and yet diverse – +see the STD image that exposes the uncertainty within the task being solved; +• Denoising y directly by D(y, σy) leads to better MMSE but poorer perceptual quality; +• The figure also shows the evolving solution within the ALD steps, and as can be seen, +the noise in y is effectively peeled layer by layer. +9.2. High Perceptual Quality Solution to Inverse Problems. We now expand our discus- +sion by returning to general linear inverse problems of the form y = Hx+v, where H ∈ RM×N +is a known matrix, v ∈ RM is AWGN, and y ∈ RM is the given measurement vector. Our +goal is to propose novel solutions to these problems while striving for high perceptual quality. +The above discussion on the perception-distortion tradeoff is not limited to image denoising, +but also applies to more general inverse problems [22]. There too, potential solvers need to +tradeoff distortion metrics (e.g. MSE) versus perception measures (e.g. the distribution shift +between real images and the obtained solutions). Indeed, MSE in these cases may become +far more challenging as an optimization goal due to the ill-posedness of the inverse problems. +Consider, as an example, an inpainting problem in which the bottom half of the image is given +and the goal is to recover the top part. The MMSE solution in this case necessarily averages +all possible completions, resulting in a very blurry outcome. More broadly, optimizing for +MSE in this context would result in a clear regression-to-the-mean, which is significantly +more pronounced in under-determined inverse problems than in image denoising. + +34 +M. ELAD, B. KAWAR AND G. VAKSMAN +Original +MMSE +Noisy +STD +Samples from the Posterior Distribution +Intermediate Denoising Steps +Figure 9.3: Image denoising using the modified version of Annealed Langevin Dynamics [146]. +Top row (left to right): An original image, its noisy version (σy = 100), the MMSE-optimized +denoiser’s result, and the STD of the sampled solutions. Middle row: 6 sampled ALD denoising +solutions. Bottom row: 6 intermediate steps within the ALD algorithm. +Successful inverse problem solvers, such as the Plug-and-Play Prior [295] and RED [231] +algorithms mentioned in Section 7, aim for a Maximum-a-Posteriori (MAP) solution to the +inverse problem at hand, rather than MMSE. While these methods achieve impressive results, +the MAP solution can be improved upon in terms of perceptual quality without compromising +on distortion performance [22]. This is due to the deterministic nature of MAP solvers – a +solver that aims for best perceptual quality should necessarily be stochastic in order to account +for the multiple possible solutions to the given problem [211]. +Similar to the image denoising case, stochastically sampling from the posterior distribution +achieves perfect perceptual quality in general inverse problems. Following the road paved +in the previous section, an appealing way to approximate such sampling would be to follow +Equation (9.2), using a generative diffusion model and augmenting the score by an analytical +term that conditions on the observed measurement y. This idea has been initially suggested +by [262, 138] for handling noiseless linear inverse problems, and later extended to the more +general case in [145, 142, 54, 53, 195]. Below we describe the essence of the proposed approach +in SNIPS [145]. Visual examples of this method in action are brought in Figure 9.4 for several +inverse problems. +Our goal is to obtain a closed-form expression for the term ∇˜x log p(y|˜x) in Equation (9.2). We +use the following two relationships: (i) y = Hx+v is the noisy measurement (v ∼ N +� +0, σ2 +yI +� +), +and (ii) ˜x = x + z is the annealed solution (z ∼ N(0, σ2 +i I)). The likelihood function can be + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +35 +Original +Degraded +MMSE +Samples from the Posterior +Figure 9.4: Comparison of an MMSE result with samples from the posterior distribution +using SNIPS [145]. Note the subtle improvements in perceptual quality from MMSE to the +posterior samples, especially in the finer details such as the hair. The comparison is conducted +on 64×64 pixel images from CelebA [172], on the problems of compressive sensing, inpainting, +and 4× super-resolution (top-to-bottom). +simplified to +p(y|˜x) = p(y − H˜x|˜x) +(9.5) += p(Hx + v − Hx − Hz|˜x) += p(v − Hz|x + z). +As in the denoising case in Equation (9.3), statistical independence between v and z cannot +be assumed due to the dependency on ˜x. The alternative, as shown by SNIPS [145] relies +again on a delicate connection between these two random entities, obtained by a decoupling of +the measurements’ equation via an Singular Value Decomposition (SVD) of the degradation +matrix H = UΣVT : +p(y|˜x) = p(v − Hz|x + z) +(9.6) += p(UT v − ΣVT z|VT x + VT z) += p(ˆv − Σˆz|ˆx + ˆz) += +� +k +p(ˆvk − skˆzk|ˆxk + ˆzk). +The second row in the above equation is obtained by transforming the term v − Hz by the +matrix UT , and similarly transforming x + z via a multiplication with VT . As these are +unitary matrices, the transformations applied do not change the statistics. Considering the +transformed vectors UT y = ˆy, VT x = ˆx, VT z = ˆz and UT v = ˆv leads to the third row + +36 +M. ELAD, B. KAWAR AND G. VAKSMAN +in the above equation. This joint probability can be decoupled into a separable Gaussian +distribution if we choose each entry ˆvk − skˆzk to be independent of ˆzk, just as practiced in the +denoising case, and this time while taking into account the singular value sk. This algorithm, +fully described in [145], demonstrates considerable success in a number of inverse problems +(see Figure 9.4), and already has several followup works [142, 54, 53, 195]. +We should mention that an alternative to all the above exists, in which one simply adds the +corrupted measurements y as an input to the denoising model itself, effectively condition- +ing the entire generative process on y [245, 243, 303]. This approach requires designing and +training a separate denoiser for each inverse problem, as the denoiser would need to implic- +itly learn the connection between the images and their corresponding measurements for the +specific problem at hand. Interestingly, this approach requires pairs of images, x and y, in +its training, but does not utilize knowledge of the degradation model itself (e.g., the matrix +H). This property allows this alternative approach to generalize beyond clearly formulated +inverse problems, and handle tasks such as stylization, JPEG-deblocking, and more. +"a DSLR photo of a +kangaroo walking in +New York City" +"an oil painting by +Matisse of a humanoid +robot playing chess" +"a stern-looking owl +dressed as a +librarian, digital art" +"3D render of a small +green balloon dog in +a light pink room" +"a photo of a wild +boar in a street, +wearing headphones" +Figure 9.5: Examples of synthesized images using DALL-E 2 [224], a text-to-image generative +denoising diffusion model. The input conditioning text is written below each image. +A particularly interesting case is when y is a textual description of the image contents. By +conditioning the denoiser model on such text, the generative diffusion process allows users to +perform text-to-image generation [224, 244, 232, 14]. This unprecedented capability became +instantly popular, as users were able to synthesize high-quality images by simply describing the +desired result in natural language, as we demonstrate in Figure 9.5. These models have become +a centerpiece in an ongoing and quickly advancing research area, as they have been adapted +for image editing [147, 202], object recontextualization [241, 95], 3D object generation [220], +and more [119, 129, 213, 346]. +10. Conclusion. Removal of white additive Gaussian noise from an image is a fascinating +topic, both because it poses a very interesting engineering challenge, and even more so, because +it creates new opportunities in image processing and machine learning. +In this paper we +highlight these two branches of activities. The first half of the paper concentrates on the +design of such denoisers, with a particular interest on the impact of the AI revolution on +this field. The second half of the paper features the usefulness of such image denoisers for +handling other tasks, such as image synthesis and solving inverse problems while targeting + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +37 +high-perceptual quality solutions. Figure 10.1 encapsulates this part of the story in a block +diagram. +Much remains to be done in this domain, in better understanding how to design appropriate +MMSE denoisers, and in harnessing them to other tasks beyond the ones described in this +paper, such as compression, segmentation, and more. More broadly, there are so many op- +portunities and challenges in better understanding, designing, and proposing creative usage +of image denoisers. +Figure 10.1: A summary of the main message of this paper: an MMSE denoiser is key in +synthesizing images and solving inverse problems. Interestingly, there is a great unexplored +proximity between PnP and RED algorithms [295, 231] and the more recent, diffusion-based, +techniques for getting high perceptual quality solutions for inverse problems [145, 53, 195]. + +Suppose that we are given an MMSE denoiser D(y) +WE CAN USE D(y)FOR .. +Solving ANY +Synthesizing +Denoising images +Solving ANY +natural- +while targeting +inverse +inverse problem +looking +problem +high perceptual +with high +(PnP/RED) +images +quality +perceptual quality +All the above are achieved by +simply applying D(y) iteratively38 +M. ELAD, B. KAWAR AND G. VAKSMAN +Appendix A. Derivation of the MMSE Estimation. +Consider an ideal image x drawn from the probability density function p(x), and assume that +we are given a measurement of it, y, related to it via the conditional probability p(y|x). Our +goal is to find the estimator ˆx = f(y) that minimizes the expected mean-squared-error, +MSE = E +� +∥x − ˆx∥2 +2 | y +� += E +� +∥x − f(y)∥2 +2 | y +� += +� +∥x − f(y)∥2 +2p(x|y)dx. +(A.1) +Observe that this expectation is taken with respect to the unknown image x, while considering +y as known. In order to minimize the above measure, we take a derivative of this expression +with respect to f(y) and null it, +d +df(y) +� +∥x − f(y)∥2 +2 p(x|y)dx = −2 +� +(x − f(y)) p(x|y)dx = 0. +(A.2) +This results in +� +xp(x|y)dx = +� +f(y)p(x|y)dx = f(y) +� +p(x|y)dx = f(y). +(A.3) +The last step on the right-hand-side relies on the fact that +� +p(x|y)dx = 1. Thus, we get the +familiar closed-form solution for the MMSE estimation [217], +fMMSE(y) = +� +x +xp(x|y)dx = E (x|y) . +(A.4) +As a final step, as the posterior is not directly accessible, we may use the Bayes rule [137] and +write +fMMSE(y) = +� +x +xp(y|x)p(x) +p(y) +dx = +� +x +x +p(y|x)p(x) +� +x p(y|x)p(x)dxdx, +(A.5) +where this formula uses the ingredients we started with – p(y|x) and p(x). +Appendix B. A Closer Look at the Evolution of Priors. +Using the Gibbs distribution form, p(x) = c · exp{−ρ(x)}, we shift our focus from the prob- +ability density function p(x) to it’s corresponding energy function ρ(x). Table 3.1 brings a +list of possible analytical expressions for ρ(x) as evolved in the image processing literature. +Below we describe each of these options briefly, adopting the context of solving a general linear +inverse problem of the form y = Hx + v with the assumptions that v ∼ N(0, σ2I) and H is a +full-rank known matrix of size m × N (m < N). The MAP estimation in this case is given by +ˆxMAP = arg min +x +�∥Hx − y∥2 +2 +2σ2 +− log (p(x)) +� +(B.1) += arg min +x +� +∥Hx − y∥2 +2 + c · ρ(x) +� +. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +39 +Notice that 2σ2 was absorbed into the constant: c = 2σ2. Armed with this expression, let’s +consider each of the choices in Table 3.1 and explore its implications. Before diving into these +options, observe that without the regularization provided by ρ(x), the above optimization +becomes an ill-posed Least-Squares problem with infinitely many possible solutions. Thus, +the added prior serves as an important regularization, pushing towards a single (and hopefully, +meaningful) solution. +• Energy regularization: If HT H cannot be inverted, the most obvious algebraic +remedy would be to add a constant to its diagonal, resulting with the regularized +solution ˆxMAP = (HT H + cI)−1HT y. +This is exactly the solution offered by the +choice ρ(x) = ∥x∥2 +2, and when the constant c is taken to 0, this leads to the familiar +pseudo-inverse solution ˆxMAP = H†y. While mathematically appealing, this option +does not yield satisfactory visual results [251]. +• Spatial Smoothness: It is well-known that adjacent pixels in natural images are +more likely to be of smoothly varying values. Thus, penalizing a deviation from such +a smoothness property seems well-justified [15, 154]. +Plugging the option ρ(x) = +∥Lx∥2 +2 into the MAP expression leads to the closed-form solution ˆxMAP = (HT H + +cLT L)−1HT y, which is very-closely related to the well-known Wiener filter [304]. +• Optimally Learned Transform: Given a large enough dataset of images, we could +fit a multivariate Gaussian N(0, R) to them by adjusting the second moment. The +assumed zero mean is easily obtained by subtracting the mean image from the given +data. PCA [215] or Karhunen-Lo´eve Transform (KLT) [174, 140, 37, 136] offer a clear +computational path towards this moment matrix R as the auto-correlation matrix of +the available data. When the expression ρ(x) = xT R−1x is plugged into the MAP esti- +mation, we come back to the Wiener filter, this time as ˆxMAP = (HT H+cR−1)−1HT y. +Note that the same treatment could emerge from this formulation – ρ(x) = ∥Tx∥2 +2 = +xT R−1x, where T is the corresponding transform that should be applied on x, and +clearly TT T = R−1. +• Weighted Smoothness: All the above options suffer from the same difficulty – +they produce overly smoothed results. +In retrospect, the reason is obvious: non- +smooth behavior is heavily penalized and thus not encouraged, which results with +smeared edges. A way to overcome this difficulty is to produce a weight map that +describes the local smoothness tendency – regions in which smoothness is believed to +be correct should be assigned with a high weight, while low weight should be given +to regions suspected to be textured or edges [246, 52]. By constructing a diagonal +matrix W that contains the above weights as the main diagonal, and using the choice +& ρ(x) = ∥Lx∥2 +W, the MAP estimation becomes ˆxMAP = (HT H + cLT WL)−1HT y. +This is a spatially adaptive solution, dependent on the local weights. One may consider +an iterative approach where the temporary solution ˆxMAP is leveraged to update the +weights and then ˆxMAP is re-computed. This interesting option leads to the robust +statistics alternative discussed next [20]. +Before proceeding with the other prior options, we would like to draw the readers’ attention to + +40 +M. ELAD, B. KAWAR AND G. VAKSMAN +the fact that all the above choices correspond to the core assumption that the probability den- +sity function p(x) is a multivariate Gaussian. The obtained visual results of these techniques +expose the fact that this Gaussianity assumption is not adequate, and indeed, later research +in image processing turned to non-Gaussian and heavy-tailed alternative distributions, which +we discuss next. +• Robust statistics: Here is a simple experiment – take any natural image, apply +a Laplacian on it, and gather a histogram of the resulting values. This histogram +is likely to look as a heavy-tailed probability density function of a form similar to +c · exp(−|x|α) with α ≪ 2. This is exactly the deviation from Gaussianity referred +to above. +Thus, the robust statistics alternative [126, 97, 96, 44, 238] suggests a +replacement of the L2-norm of Lx by L1 or, more broadly, by functions of the form +1T µ{Lx} (e.g. µ(x) = |x|α). Notice that from here on, closed-form MAP solution +cannot be obtained, and iterative minimization strategies are necessary. +Adopting a different point of view, robust statistics considers pixel on edges and tex- +tures regions as outliers to the Gaussian distribution, and thus use robust estimation +techniques for their better handling. +• Total-Variation (TV): The same motivation as described above led to this brilliant +PDE formulation of spatial smoothness, ρ(x) = +� +v∈Ω |∇x(v)|dv, which accumulates +the length of the spatial gradients instead of their squares [240]. In its discretized +form, its treatment is very similar to the robust-statistics option. However, TV has +very different roots, providing a geometrically oriented edge-preserving measure of +smoothness – see various extensions of this line of work in [17, 99, 39, 3]. +• Other PDE-based options: While TV applies an L1-norm on the spatial gradients, +more general options can be envisioned, in which the accumulation is spatially adap- +tive, orientation sensitive, geometrically faithful, and more [216, 38, 255, 302, 111]. +Starting with the seminal anisotropic diffusion method by Perona and Malik [216], +various such methods of the form ρ(x) = +� +v∈Ω g +� +∇x(v), ∇2x(v) +� +dv were proposed +and perfected over the years, forming an exciting sub-field of mathematically oriented +image processing that relies on the vast knowledge in partial differential equations. +• Field-of-Experts (FoE): Let us return to the robust statistics option described above +and enrich it by considering a mixture of such distributions, ρ(x) = � +k λk1T µk{Lkx}. +This implies the need to define a series of functions µk and their corresponding weights +λk. FoE suggests to learn these elements from an image dataset, thus better fitting the +assumed prior to natural images. While earlier work on FoE [236] suggested a patch- +based maximum-likelihood learning approach, later efforts [50] brought a deep-learning +alternative tools to this fitting. +• Wavelet sparsity: The idea of relying on transform coefficients for constructing ρ(x) +has already been explored in the context of the KLT. The emergence of the Wavelet +transform in the late 80’s brought a new way of thinking about signals and images, +offering an elegant and more effective multi-scale representation that relies on non- +linear approximation [77, 76, 57, 98, 185, 177, 43, 221, 175, 325, 108]. Wavelets offer + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +41 +a concise description of the data with as few as possible coefficients, this way giving +birth to the central notion of sparsity. +This translates well to the proposed prior +ρ(x) = ∥Wx∥1 that promotes fewer non-zero dominant Wavelet coefficients. +As an interesting side note, if we are handling the image denoising problem – i.e. +H = I in Equation (B.1) – and the Wavelet transform matrix W is unitary, the +solution ˆxMAP has a closed-form solution, obtained via a soft-shrinkage [77, 76]. +• Self-similarity: So far we described two primary forces that promote simplicity in +image content – spatial smoothness and representation sparsity. Self-similarity is a +third such force that has been recognized as central by series of contributions, starting +with the seminal Non-Local-Means (NLM) algorithm [32], and heavily relied upon by +the famous BM3D [61] and other algorithms [181, 274, 187, 46, 297, 203, 273, 248, 167]. +Self-similarity stands for the assumption that any given (small-enough) patch in an +image is likely to find very similar ones in the image support, and thus treating these +together somehow is likely to lead to better recovery. More specifically, the expression +we bring here as an illustration, +ρ(x) = +� +k +� +j∈Ω(k) +d{Rkx, Rjx}, +(B.2) +sweeps through the image support, extract a patch in location k by the operator Rkx, +and finds all its corresponding matches j ∈ Ω(k). Forcing proximity between Rkx and +the patches Rjx induces a strong regularization over the unknown image x. +• Sparsity methods: While the notion of sparsity has already been exploited by +wavelets, later work took this idea and strengthened it by considering redundant and +learned representations. Under the assumption that ideal images can be described as +linear combinations of atoms from a pre-specified dictionary D, i.e., x = Dα, forcing +sparsity on the representation via the term ∥α∥0 provides an appealing and computa- +tionally feasible choice for ρ(x) [31, 88]. Vast work along these lines has been done, +considering global dictionaries and later local (patch-based) ones, leading to various +very successful recovery algorithms [89, 90, 4, 183, 182, 181, 81, 320, 71, 74, 100, 72, 85]. +• Low-Rank assumption: The last member to enter the Pantheon of image priors for +image processing relies on a low-rank assumption over groups of similar patches. This +idea is closely related to the self-similarity force described above, and in fact builds on +top of it. Given a set of closely related patches, instead of forcing proximity between +them, one may gather these as columns in a matrix and force a low-rank structure, +implying that all these patches are spanned by few main directions. Several very strong +recovery algorithms leveraged this idea in various forms, while exploiting theoretical +analysis that ties the low-rank requirement to the nuclear-norm [305, 35]. By summing +these norms over such groups, ρ(x) = � +k ∥XΩ(k)∥∗, a very potent regularization is +obtained [110, 310]. +As a summary, the above-described evolution of the priors has served as the skeleton of image +processing, forming the consistent progress of this field over the years. +This evolution is + +42 +M. ELAD, B. KAWAR AND G. VAKSMAN +Table B.1: Evolution of priors for images. +Years +Core concept +Formulae for ρ(·) +Representative +Reference +∼ 1970 +Energy regularization +∥x∥2 +2 +[251] +1975-1985 +Spatial smoothness +∥Lx∥2 +2 or ∥Dvx∥2 +2 + ∥Dhx∥2 +2 +[154] +1980-1985 +Optimally Learned Transform +∥Tx∥2 +2 = xT R−1x +[37] +where T/R is learned via PCA +1980-1990 +Weighted smoothness +∥Lx∥2 +W +[246] +1990-2000 +Robust statistics +1T µ{Lx} +[20] +e.g., Hubber-Markov +1992-2005 +Total-Variation +� +v∈Ω |∇x(v)|dv +[240] +or 1T � +|Dvx|2 + |Dhx|2 +1987-2005 +Other PDE-based options +� +v∈Ω g +� +∇x(v), ∇2x(v) +� +dv +[302] +2005-2009 +Field-of-Experts +� +k λk1T µk{Lkx} +[237] +1993-2005 +Wavelet sparsity +∥Wx∥1 +[76] +2000-2010 +Self-similarity +� +k +� +j∈Ω(k) d{Rkx, Rjx} +[32, 61] +2002-2012 +Sparsity methods +∥α∥0 s.t. x = Dα +[31] +2010-2017 +Low-Rank assumption +� +k ∥XΩ(k)∥∗ +[110] +characterized by four major and interconnected trends: +1. A migration from the familiar Gaussian distribution to the less intuitive heavy-tailed +ones; +2. A departure from L2 to sparsity-promoting norms, such as the L1; +3. A drift from linear approximation techniques (e.g. +PCA) to non-linear ones (e.g. +wavelets and sparse modeling); and above all, +4. A replacement of axiomatic expressions with learned ones. +Appendix C. Landmark Denoisers over the Years. +In Figure 4.1 we brought a graph showing the PSNR performance of landmark denoising algo- +rithms over the years. Below we provide more information on these techniques for completeness +of this study. For each of these we bring the full reference, describe the core algorithmic idea, +and provide the PSNR denoising performance on the BSD68 dataset (σ = 25). We should +note that in choosing the methods to include in this list we restricted the scope to ones that +report of BSD68 results. +• KSVD [89] [28.28dB]: Elad, M., & Aharon, M. (2006). Image denoising via sparse +and redundant representations over learned dictionaries. IEEE Transactions on Image +processing, 15(12), 3736-3745. +This method decomposes the noisy image into fully overlapping patches, and denoises +each by sparse approximation via OMP [214], while learning an over-complete dic- + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +43 +tionary. The denoised image is obtained by returning the cleaned patches to their +original locations while averaging them over the overlaps and with a weighted version +of the noisy image. +• BM3D [61] [28.57dB]: Dabov, K., Foi, A., Katkovnik, V., & Egiazarian, K. (2007). +Image denoising by sparse 3-D transform-domain collaborative filtering. IEEE Trans- +actions on Image Processing, 16(8), 2080-2095. +This algorithm extracts all fully overlapping patches from the noisy image and gathers +similar patches into 3D blocks. Denoising is performed by transforming these blocks, +forcing sparsity, and then transforming the sparse outcome back to the image domain. +The denoised image is obtained by returning the patches to their original locations +while averaging over the overlaps. This process is ran twice, where the first round +serves for an initial cleaning that improves the patch correspondences for the later +round. +• FoE [237] [27.77dB]: Roth, S., & Black, M. J. (2009). Fields of experts. Interna- +tional Journal of Computer Vision, 82(2), 205-229. +FoE (appeared originally in 2005 [236]) builds a generic prior that mixes several reg- +ularizers (called ”experts”). The prior’s parameters are learned via a contrastive di- +vergence penalty and MCMC sampling. The image denoising itself is obtained by an +iterative algorithm that computes the MAP estimation. +• LSSC [181] [28.70dB]: Mairal, J., Bach, F., Ponce, J., Sapiro, G., & Zisserman, A. +(2009). Non-local sparse models for image restoration. CVPR (pp. 2272-2279). +This algorithm combines the sparse representations (as in KSVD) and non-local sim- +ilarity (as in BM3D) concepts. It decomposes the noisy image into fully overlapping +patches and groups similar patches together. These groups of patches are denoised by +a joint sparse approximation that forces the same support over a learned dictionary. +The denoised image is obtained by returning the patches to their original locations +and averaging over the overlaps. +• EPLL [347] [28.71]: Zoran, D., & Weiss, Y. (2011). From learning models of natural +image patches to whole image restoration. ICCV (pp. 479-486). +EPLL models the distribution of image patches as a Gaussian Mixture Model (GMM), +and learns its parameters off-line with a dataset of clean images. +Denoising with +EPLL is a MAP estimation, posed as a minimization problem with a regularizer that +consists of a sum of patch log-likelihoods. This task is solved by applying quadratic +half-splitting and iterating over patch denoising and the whole image accumulation +steps. +• MPL [33] [28.96dB]: Burger, H. C., Schuler, C. J., & Harmeling, S. (2012, June). +Image denoising: Can plain neural networks compete with BM3D?. CVPR (pp. 2392- +2399). + +44 +M. ELAD, B. KAWAR AND G. VAKSMAN +This is the first effective deep-learning based method for image denoising. This method +extracts all fully overlapped patches as in classical algorithms, and filters each patch +by applying a multi-layer Perceptron (fully connected network). The reconstructed +image is obtained by returning the patches to their locations and averaging over the +overlapping regions. +• CSF [252] [28.74dB]: Schmidt, U., & Roth, S. (2014). Shrinkage fields for effective +image restoration. CVPR (pp. 2774-2781). +This algorithm poses a MAP estimation problem using a product of cascaded shrinkage +functions as a local prior. The parameters of these functions are learned from a dataset +as in FoE. The algorithm solves the obtained optimization by half-quadratic splitting +and iterating between local and global optimization steps. +• WNNM [110] [28.83dB]: Gu, S., Zhang, L., Zuo, W., & Feng, X. (2014). Weighted +nuclear norm minimization with application to image denoising. CVPR (pp. 2862- +2869). +This method decomposes an incoming image into fully overlapping patches and groups +similar patches arranging them as columns of a matrix. Denoising of the patches is +performed by forcing the rank of the constructed matrices to be small by minimizing +the matrix nuclear norm. The reconstructed image is obtained by returning the patches +to their original locations while averaging the overlaps. +• TNRD [50] [28.92dB]: Chen, Y., & Pock, T. (2016). +Trainable nonlinear reac- +tion diffusion: A flexible framework for fast and effective image restoration. IEEE +Transactions on Pattern Analysis and Machine Intelligence, 39(6), 1256-1272. +This method builds on the FoE method, by unfolding the minimization over its prior +and this way defining a parametric trainable network. Once the architecture is defined, +TNRD trains this neural network end-to-end in a supervised fashion using clean/noisy +pairs of images. Denoising is a simple inference of the resulting machine. +• DnCNN [330] [29.23dB]: Zhang, K., Zuo, W., Chen, Y., Meng, D., & Zhang, L. +(2017). Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. +IEEE Transactions on Image Processing, 26(7), 3142-3155. +This is the first deep learning method that outperforms classical algorithms by a con- +siderable gap. +It filters images by applying a convolutional neural network. +The +network architecture is composed of convolutional layers followed by batch normal- +izations and ReLU. The network is trained end-to-end using a dataset consisting of +noisy/clean image pairs. +• IRCNN [331] [29.15dB] Zhang, K., Zuo, W., Gu, S., & Zhang, L. (2017). Learning +deep CNN denoiser prior for image restoration. CVPR (pp. 3929-3938). +This method is similar to DnCNN, but uses dilated convolutions within the architec- +ture in order to enlarge the receptive field, thus creating an opportunity for a non-local + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +45 +processing. The network is trained end-to-end using a dataset consisting of noisy/clean +image pairs. +• NLRN [167] [29.41dB]: Liu, D., Wen, B., Fan, Y., Loy, C. C., & Huang, T. S. +(2018). Non-local recurrent network for image restoration. NeurIPS. +This method incorporates the non-local similarity concept into a convolutional recur- +rent neural network in an explicit way. The denoising is done by recurrently applying +convolutions and weighted averaging of similar regions (as in NLM [32]) in the feature +space. The network is trained end-to-end using a dataset consisting of noisy/clean +image pairs. +• MVCNN [168] [29.41dB]: Liu, P., Zhang, H., Zhang, K., Lin, L., & Zuo, W. (2018). +Multi-level wavelet-CNN for image restoration. CVPR Workshop (pp. 773-782). +This algorithm incorporates the wavelet sparsity concept into the deep learning ap- +proach by combining the U-Net architecture with the multi-level wavelet transform. It +replaces the downsampling and upsampling U-Net layers with the 2D discrete wavelet +transform and it’s inverse. The network is trained end-to-end using a dataset consist- +ing of noisy/clean image pairs. +• N3Net [218] [29.30dB]: Pl¨otz, T., & Roth, S. (2018). Neural nearest neighbors +networks. NeurIPS. +This method combines the deep learning approach with the non-local self-similarity +concept. +This method introduces a differentiable continuous relaxation of the k- +nearest neighbor (KNN) selection rule and uses it as a building block within the neural +network. N3Net’s architecture interleaves convolutional blocks with KNN relaxation +blocks. The convolutional blocks perform denoising, while the KNN parts augment +the feature maps by breaking them into patches, applying patch matching, and finding +k-nearest neighbors for each patch. The network is trained end-to-end using a dataset +consisting of noisy/clean image pairs. +• FFDNet [332] [29.19dB]: Zhang, K., Zuo, W., & Zhang, L. (2018). +FFDNet: +Toward a fast and flexible solution for CNN-based image denoising. IEEE Transactions +on Image Processing, 27(9), 4608-4622. +While the architecture of this deep learning method resembles DnCNN, it enlarges the +receptive field by reshaping the incoming image into four downsampled sub-images +that are simultaneously fed into the network. The network is trained end-to-end using +a dataset consisting of noisy/clean image pairs. +• FOCNet [133] [29.38dB] Jia, X., Liu, S., Feng, X., & Zhang, L. (2019). Focnet: A +fractional optimal control network for image denoising. CVPR (pp. 6054-6063). +This algorithm suggests a novel architecture to replace the one used by DnCNN, rely- +ing on an interpretation of residual neural networks as solvers of dynamical systems. +While DnCNN refers to integer-order ordinary differential equation, FOCNet’s archi- +tecture poses a fractional optimal control (FOC) problem that translates into better + +46 +M. ELAD, B. KAWAR AND G. VAKSMAN +connectivity. +The algorithm for solving the equation is implemented using a feed- +forward convolutional neural network whose parameters are learned using a dataset of +images. +• RIDNet [8] [29.34dB]: Anwar, S., & Barnes, N. (2019). Real image denoising with +feature attention. CVPR (pp. 3155-3164). +This algorithm introduces attention modules to a neural network whose architecture +includes convolutional layers and skip connections. This attention is designed to cap- +ture feature dependencies and enhance the weight of important correspondences. The +network is trained end-to-end using a dataset of clean/noisy image pairs. +• GCDN [292] [29.35dB]: Valsesia, D., Fracastoro, G., & Magli, E. (2020). Deep +graph-convolutional image denoising. IEEE Transactions on Image Processing, 29, +8226-8237. +This method combines the deep-learning approach with graph modeling. The GCDN +architecture includes convolutional and graph-convolutional layers. While regular con- +volutional layers catch local interrelations between pixels, the graph-convolution ones +are designed to capture the non-local dependencies. Each graph-convolutional layer +dynamically applies non-local aggregation (graph-convolution). +The graph is con- +structed via a k-nearest neighbor whose vertices are feature vectors. Each vertex is +connected to the k most similar ones in terms of the L2 norm. The network is trained +using a dataset of images. +• SwinIR [165] [29.50dB]: Liang, J., Cao, J., Sun, G., Zhang, K., Van Gool, L., & +Timofte, R. (2021). Swinir: Image restoration using swin transformer. CVPR (pp. +1833-1844). +This algorithm incorporates non-locality into convolutional deep learning architecture +using shifted window (Swin) transformer modules [171]. These modules are designed +to compute local self-attention in shifted windows, this way exploitig non-local self- +similarity. The SwinIR architecture is trained end-to-end using a dataset consisting +of noisy/clean image pairs. +• DRUNet [329] [29.48dB]: Zhang, K., Li, Y., Zuo, W., Zhang, L., Van Gool, L., & +Timofte, R. (2021). Plug-and-play image restoration with deep denoiser prior. IEEE +Transactions on Pattern Analysis and Machine Intelligence. +This denoiser is a bias-free [201] neural network that combines ResNet [117] and U- +Net [234]. Its architecture includes convolutions, downscaling and upscaling layers, +and skip connections. The network is trained using a dataset of images. +Appendix D. Approximation of the Score Function by an MMSE Denoiser. +In Section 7 we brought the definition of the score function, ∇x log p(x), and its approxi- +mation via a denoiser. Here we bring the derivation of this result, following the work by +Miyasawa [200], Stein [265], and Tweedie [84]. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +47 +Consider an ideal image x ∈ RN drawn from the Probability Density Function (PDF) p(x). +Assume that y is a noisy version of it, y = x + v, where v ∼ N(0, σ2 +0I). The PDF of y can +be obtained by a marginalization, +p(y) = +� +x +p(y|x)p(x)dx = +� +1 +2πσ2 +0 +�N/2 � +x +exp +� −1 +2σ2 +0 +∥y − x∥2 +2 +� +p(x)dx. +(D.1) +In the above we used the fact that p(y|x) ∼ N(x, σ2 +0I). The obtained relationship expresses +p(y) as a convolution between the original prior p(x) and an isotropic zero-mean Gaussian of +width σ0. Taking a derivative of both sides with respect to y results in the following: +∇yp(y) = +� +1 +2πσ2 +0 +�N/2 � +x +∇y exp +� −1 +2σ2 +0 +∥y − x∥2 +2 +� +p(x)dx +(D.2) += 1 +σ2 +0 +· +� +1 +2πσ2 +0 +�N/2 � +x +(y − x) exp +� −1 +2σ2 +0 +∥y − x∥2 +2 +� +p(x)dx += 1 +σ2 +0 +� +x +(y − x)p(y|x)p(x)dx. +Dividing both sides by p(y) leads to +∇yp(y) +p(y) += ∇y log p(y) = 1 +σ2 +0 +� +x +(x − y)p(y|x)p(x) +p(y) +dx +(D.3) += 1 +σ2 +0 +� +x +(x − y)p(x|y)dx. +Opening and rearranging the above expression leads to our final result: +(D.4) +∇y log p(y) = 1 +σ2 +0 +�� +x +xp(x|y)dx − y +� +x +p(x|y)dx +� += 1 +σ2 +0 +[D(y, σ0) − y] , +where D(y, σ0) should be the optimal Minimum Mean Squared Error (MMSE) denoiser, +E(x|y). Thus, access to an approximation of the score function ∇x log p(x) can be obtained +by using a small value σ0, and evaluating the above expression with a given denoiser. + +48 +M. ELAD, B. KAWAR AND G. VAKSMAN +REFERENCES +[1] A. Abdelhamed, S. Lin, and M. S. Brown, A high-quality denoising dataset for smartphone cam- +eras, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, +pp. 1692–1700. +[2] A. Abdelhamed, R. Timofte, and M. S. Brown, Ntire 2019 challenge on real image denoising: +Methods and results, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern +Recognition Workshops, 2019. +[3] H. K. Aggarwal and A. Majumdar, Hyperspectral image denoising using spatio-spectral total varia- +tion, IEEE Geoscience and Remote Sensing Letters, 13 (2016), pp. 442–446. +[4] M. Aharon, M. Elad, and A. Bruckstein, K-SVD: An algorithm for designing overcomplete dictio- +naries for sparse representation, IEEE Transactions on signal processing, 54 (2006), pp. 4311–4322. +[5] R. Ahmad, C. A. Bouman, G. T. Buzzard, S. Chan, S. Liu, E. T. Reehorst, and P. Schniter, +Plug-and-play methods for magnetic resonance imaging: Using denoisers for image recovery, IEEE +Signal Processing Magazine, 37 (2020), pp. 105–116. +[6] T. Amit, E. Nachmani, T. Shaharbany, and L. Wolf, Segdiff: Image segmentation with diffusion +probabilistic models, arXiv preprint arXiv:2112.00390, (2021). +[7] F. J. Anscombe, The transformation of poisson, binomial and negative-binomial data, Biometrika, 35 +(1948), pp. 246–254. +[8] S. Anwar and N. Barnes, Real image denoising with feature attention, in Proceedings of the +IEEE/CVF international conference on computer vision, 2019, pp. 3155–3164. +[9] P. Arias and J.-M. Morel, Video denoising via empirical bayesian estimation of space-time patches, +Journal of Mathematical Imaging and Vision, 60 (2018), pp. 70–93. +[10] P. Arias and J.-M. Morel, Kalman filtering of patches for frame-recursive video denoising, in Pro- +ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, +2019, pp. 0–0. +[11] M. Arjovsky, S. Chintala, and L. Bottou, Wasserstein generative adversarial networks, in Proceed- +ings of the 34th International Conference on Machine Learning, vol. 70 of Proceedings of Machine +Learning Research, PMLR, 06–11 Aug 2017, pp. 214–223. +[12] L. Azzari and A. Foi, Variance stabilization for noisy+ estimate combination in iterative poisson +denoising, IEEE signal processing letters, 23 (2016), pp. 1086–1090. +[13] U. Bal, Dual tree complex wavelet transform based denoising of optical microscopy images, Biomedical +optics express, 3 (2012), pp. 3231–3239. +[14] Y. Balaji, S. Nah, X. Huang, A. Vahdat, J. Song, K. Kreis, M. Aittala, T. Aila, S. Laine, +B. Catanzaro, et al., eDiff-I: Text-to-image diffusion models with an ensemble of expert denoisers, +arXiv preprint arXiv:2211.01324, (2022). +[15] M. R. Banham and A. K. Katsaggelos, Digital image restoration, IEEE signal processing magazine, +14 (1997), pp. 24–41. +[16] J. Batson and L. Royer, Noise2self: Blind denoising by self-supervision, in International Conference +on Machine Learning, PMLR, 2019, pp. 524–533. +[17] A. Beck and M. Teboulle, Fast gradient-based algorithms for constrained total variation image de- +noising and deblurring problems, IEEE Transactions on image processing, 18 (2009), pp. 2419–2434. +[18] M. Bertalm´ıo, Denoising of photographic images and video: fundamentals, open challenges and new +trends, Springer, 2018. +[19] J. Besag, Markov chain monte carlo for statistical inference, Center for Statistics and the Social Sci- +ences, 9 (2001), pp. 24–25. +[20] M. J. Black, G. Sapiro, D. H. Marimont, and D. Heeger, Robust anisotropic diffusion, IEEE +Transactions on image processing, 7 (1998), pp. 421–432. +[21] T. Blau, R. Ganz, B. Kawar, A. Bronstein, and M. Elad, Threat model-agnostic adversarial +defense using diffusion models, arXiv preprint arXiv:2207.08089, (2022). +[22] Y. Blau and T. Michaeli, The perception-distortion tradeoff, in Proceedings of the IEEE Conference +on Computer Vision and Pattern Recognition, 2018, pp. 6228–6237. +[23] A. Bosco, R. Bruna, D. Giacalone, S. Battiato, and R. Rizzo, Signal-dependent raw image + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +49 +denoising using sensor noise characterization via multiple acquisitions, in Digital Photography VI, +vol. 7537, SPIE, 2010, pp. 34–43. +[24] L. Bottou et al., Stochastic gradient learning in neural networks, Proceedings of Neuro-Nımes, 91 +(1991), p. 12. +[25] J. Boulanger, C. Kervrann, P. Bouthemy, P. Elbau, J.-B. Sibarita, and J. Salamero, Patch- +based nonlocal functional for denoising fluorescence microscopy image sequences, IEEE transactions +on medical imaging, 29 (2009), pp. 442–454. +[26] C. Bouman and K. Sauer, A generalized gaussian image model for edge-preserving map estimation, +IEEE Transactions on image processing, 2 (1993), pp. 296–310. +[27] S. Boyd, N. Parikh, E. Chu, B. Peleato, J. Eckstein, et al., Distributed optimization and sta- +tistical learning via the alternating direction method of multipliers, Foundations and Trends® in +Machine learning, 3 (2011), pp. 1–122. +[28] A. Brifman, Y. Romano, and M. Elad, Turning a denoiser into a super-resolver using plug and play +priors, in 2016 IEEE International Conference on Image Processing (ICIP), IEEE, 2016, pp. 1404– +1408. +[29] A. Brock, J. Donahue, and K. Simonyan, Large scale GAN training for high fidelity natural image +synthesis, in International Conference on Learning Representations, 2018. +[30] T. Brooks, B. Mildenhall, T. Xue, J. Chen, D. Sharlet, and J. T. Barron, Unprocessing +images for learned raw denoising, in Proceedings of the IEEE/CVF Conference on Computer Vision +and Pattern Recognition, 2019, pp. 11036–11045. +[31] A. M. Bruckstein, D. L. Donoho, and M. Elad, From sparse solutions of systems of equations to +sparse modeling of signals and images, SIAM review, 51 (2009), pp. 34–81. +[32] A. Buades, B. Coll, and J.-M. Morel, A non-local algorithm for image denoising, in IEEE CVPR, +vol. 2, 2005, pp. 60–65. +[33] H. C. Burger, C. J. Schuler, and S. Harmeling, Image denoising: Can plain neural networks +compete with BM3D?, in IEEE CVPR, 2012, pp. 2392–2399. +[34] G. T. Buzzard, +S. H. Chan, +S. Sreehari, +and C. A. Bouman, Plug-and-play unplugged: +Optimization-free reconstruction using consensus equilibrium, SIAM Journal on Imaging Sciences, +11 (2018), pp. 2001–2020. +[35] E. J. Cand`es, X. Li, Y. Ma, and J. Wright, Robust principal component analysis?, Journal of the +ACM (JACM), 58 (2011), pp. 1–37. +[36] H. Cao, C. Tan, Z. Gao, G. Chen, P.-A. Heng, and S. Z. Li, A survey on generative diffusion +model, arXiv preprint arXiv:2209.02646, (2022). +[37] K. R. Castleman, Digital image processing, Prentice Hall Press, 1996. +[38] F. Catt´e, P.-L. Lions, J.-M. Morel, and T. Coll, Image selective smoothing and edge detection by +nonlinear diffusion, SIAM Journal on Numerical analysis, 29 (1992), pp. 182–193. +[39] A. Chambolle and T. Pock, A first-order primal-dual algorithm for convex problems with applications +to imaging, Journal of mathematical imaging and vision, 40 (2011), pp. 120–145. +[40] R. H. Chan, C.-W. Ho, and M. Nikolova, Salt-and-pepper noise removal by median-type noise +detectors and detail-preserving regularization, IEEE Transactions on image processing, 14 (2005), +pp. 1479–1485. +[41] S. H. Chan, Performance analysis of plug-and-play admm: A graph signal processing perspective, IEEE +Transactions on Computational Imaging, 5 (2019), pp. 274–286. +[42] S. H. Chan, X. Wang, and O. A. Elgendy, Plug-and-play ADMM for image restoration: Fixed-point +convergence and applications, IEEE Transactions on Computational Imaging, 3 (2016), pp. 84–98. +[43] S. G. Chang, B. Yu, and M. Vetterli, Adaptive wavelet thresholding for image denoising and +compression, IEEE transactions on image processing, 9 (2000), pp. 1532–1546. +[44] P. Charbonnier, L. Blanc-F´eraud, G. Aubert, and M. Barlaud, Deterministic edge-preserving +regularization in computed imaging, IEEE Transactions on image processing, 6 (1997), pp. 298–311. +[45] P. Chatterjee and P. Milanfar, Is denoising dead?, IEEE Transactions on Image Processing, 19 +(2009), pp. 895–911. +[46] P. Chatterjee and P. Milanfar, Patch-based near-optimal image denoising, IEEE Transactions on +Image Processing, 21 (2011), pp. 1635–1649. +[47] C. Chen, Z. Xiong, X. Tian, and F. Wu, Deep boosting for image denoising, in Proceedings of the + +50 +M. ELAD, B. KAWAR AND G. VAKSMAN +European Conference on Computer Vision (ECCV), 2018, pp. 3–18. +[48] H. Chen, Y. Zhang, W. Zhang, P. Liao, K. Li, J. Zhou, and G. Wang, Low-dose ct denoising with +convolutional neural network, in 2017 IEEE 14th International Symposium on Biomedical Imaging +(ISBI 2017), IEEE, 2017, pp. 143–146. +[49] W. Chen, D. Wipf, and M. Rodrigues, Deep learning for linear inverse problems using the plug-and- +play priors framework, in ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech +and Signal Processing (ICASSP), IEEE, 2021, pp. 8098–8102. +[50] Y. Chen and T. Pock, Trainable nonlinear reaction diffusion: A flexible framework for fast and +effective image restoration, IEEE transactions on pattern analysis and machine intelligence, 39 +(2016), pp. 1256–1272. +[51] B. T. Christian, N. T. Vandehey, J. M. Floberg, and C. A. Mistretta, Dynamic pet denoising +with hypr processing, Journal of Nuclear Medicine, 51 (2010), pp. 1147–1154. +[52] C. Chu, I. Glad, F. Godtliebsen, and J. Marron, Edge-preserving smoothers for image processing, +Journal of the American Statistical Association, 93 (1998), pp. 526–541. +[53] H. Chung, J. Kim, M. T. Mccann, M. L. Klasky, and J. C. Ye, Diffusion posterior sampling for +general noisy inverse problems, arXiv preprint arXiv:2209.14687, (2022). +[54] H. Chung, B. Sim, D. Ryu, and J. C. Ye, Improving diffusion models for inverse problems using +manifold constraints, arXiv preprint arXiv:2206.00941, (2022). +[55] R. Cohen, Y. Blau, D. Freedman, and E. Rivlin, It has potential: Gradient-driven denoisers for +convergent solutions to inverse problems, Advances in Neural Information Processing Systems, 34 +(2021), pp. 18152–18164. +[56] R. Cohen, M. Elad, and P. Milanfar, Regularization by denoising via fixed-point projection (RED- +PRO), SIAM Journal on Imaging Sciences, 14 (2021), pp. 1374–1406. +[57] R. R. Coifman and D. L. Donoho, Translation-invariant de-noising, in Wavelets and statistics, +Springer, 1995, pp. 125–150. +[58] M.-C. Corbineau, C. Bertocchi, E. Chouzenoux, M. Prato, and J.-C. Pesquet, Learned image +deblurring by unfolding a proximal interior point algorithm, in 2019 IEEE International Conference +on Image Processing (ICIP), IEEE, 2019, pp. 4664–4668. +[59] R. Costantini and S. Susstrunk, Virtual sensor design, in Sensors and Camera Systems for Scientific, +Industrial, and Digital Photography Applications V, vol. 5301, SPIE, 2004, pp. 408–419. +[60] F.-A. Croitoru, V. Hondru, R. T. Ionescu, and M. Shah, Diffusion models in vision: A survey, +arXiv preprint arXiv:2209.04747, (2022). +[61] K. Dabov, A. Foi, V. Katkovnik, and K. Egiazarian, Image denoising by sparse 3-D transform- +domain collaborative filtering, IEEE Transactions on image processing, 16 (2007), pp. 2080–2095. +[62] J. Dai, O. C. Au, C. Pang, W. Yang, and F. Zou, Film grain noise removal and synthesis in video +coding, in 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, IEEE, +2010, pp. 890–893. +[63] Y. Dar, A. M. Bruckstein, M. Elad, and R. Giryes, Postprocessing of compressed images via +sequential denoising, IEEE Transactions on Image Processing, 25 (2016), pp. 3044–3058. +[64] P. Das, C. Pal, A. Chakrabarti, A. Acharyya, and S. Basu, Adaptive denoising of 3d volumetric +mr images using local variance based estimator, Biomedical Signal Processing and Control, 59 (2020), +p. 101901. +[65] C.-A. Deledalle, F. Tupin, and L. Denis, Poisson nl means: Unsupervised non local means for +poisson noise, in 2010 IEEE international conference on image processing, IEEE, 2010, pp. 801–804. +[66] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei, ImageNet: A large-scale hierarchical +image database, in 2009 IEEE Conference on Computer Vision and Pattern Recognition, 2009, +pp. 248–255. +[67] R. Dey, D. Bhattacharjee, and M. Nasipuri, Image denoising using generative adversarial network, +in Intelligent Computing: Image Processing Based Applications, Springer, 2020, pp. 73–90. +[68] P. Dhariwal and A. Q. Nichol, Diffusion models beat GANs on image synthesis, in Thirty-Fifth +Conference on Neural Information Processing Systems, 2021. +[69] N. Divakar and R. Venkatesh Babu, Image denoising via CNNs: An adversarial approach, in +Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, +2017, pp. 80–87. + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +51 +[70] M. Diwakar, P. Kumar, and A. K. Singh, Ct image denoising using nlm and its method noise +thresholding, Multimedia Tools and Applications, 79 (2020), pp. 14449–14464. +[71] W. Dong, X. Li, L. Zhang, and G. Shi, Sparsity-based image denoising via dictionary learning and +structural clustering, in CVPR 2011, 2011, pp. 457–464. +[72] W. Dong, G. Shi, Y. Ma, and X. Li, Image restoration via simultaneous sparse coding: Where +structured sparsity meets Gaussian scale mixture, International Journal of Computer Vision, 114 +(2015), pp. 217–232. +[73] W. Dong, P. Wang, W. Yin, G. Shi, F. Wu, and X. Lu, Denoising prior driven deep neural network +for image restoration, IEEE Transactions on Pattern Analysis and Machine Intelligence, 41 (2018), +pp. 2305–2318. +[74] W. Dong, L. Zhang, G. Shi, and X. Li, Nonlocally centralized sparse representation for image restora- +tion, IEEE Transactions on Image Processing, 22 (2012), pp. 1620–1630. +[75] Y. Dong and S. Xu, A new directional weighted median filter for removal of random-valued impulse +noise, IEEE Signal Processing Letters, 14 (2007), pp. 193–196. +[76] D. L. Donoho, De-noising by soft-thresholding, IEEE transactions on information theory, 41 (1995), +pp. 613–627. +[77] D. L. Donoho and J. M. Johnstone, Ideal spatial adaptation by wavelet shrinkage, biometrika, 81 +(1994), pp. 425–455. +[78] A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. De- +hghani, M. Minderer, G. Heigold, S. Gelly, et al., An image is worth 16x16 words: Trans- +formers for image recognition at scale, in International Conference on Learning Representations, +2020. +[79] Y. Du and I. Mordatch, Implicit generation and modeling with energy based models, in Advances in +Neural Information Processing Systems, vol. 32, 2019. +[80] A. Dudhane, S. W. Zamir, S. Khan, F. S. Khan, and M.-H. Yang, Burst image restoration +and enhancement, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern +Recognition, 2022, pp. 5759–5768. +[81] F.-X. Dup´e, J. M. Fadili, and J.-L. Starck, A proximal iteration for deconvolving poisson noisy +images using sparse representations, IEEE Transactions on Image Processing, 18 (2009), pp. 310– +321. +[82] J. Dutta, R. M. Leahy, and Q. Li, Non-local means denoising of dynamic pet images, PloS one, 8 +(2013), p. e81390. +[83] S. Dutta, A. Basarab, B. Georgeot, and D. Kouam´e, Deep unfolding of image denoising by +quantum interactive patches, in 2022 IEEE International Conference on Image Processing (ICIP), +IEEE, 2022, pp. 491–495. +[84] B. Efron, Tweedie’s formula and selection bias, Journal of the American Statistical Association, 106 +(2011), pp. 1602–1614. +[85] K. Egiazarian and V. Katkovnik, Single image super-resolution via BM3D sparse coding, in IEEE +European Signal Processing Conference (EUSIPCO), 2015, pp. 2849–2853. +[86] T. Ehret, A. Davy, P. Arias, and G. Facciolo, Joint demosaicking and denoising by fine-tuning +of bursts of raw images, in Proceedings of the IEEE/CVF International Conference on Computer +Vision, 2019, pp. 8868–8877. +[87] M. Elad, On the origin of the bilateral filter and ways to improve it, IEEE Transactions on image +processing, 11 (2002), pp. 1141–1151. +[88] M. Elad, Sparse and redundant representations: from theory to applications in signal and image pro- +cessing, vol. 2, Springer, 2010. +[89] M. Elad and M. Aharon, Image denoising via sparse and redundant representations over learned +dictionaries, IEEE Transactions on Image processing, 15 (2006), pp. 3736–3745. +[90] M. Elad, J.-L. Starck, P. Querre, and D. L. Donoho, Simultaneous cartoon and texture image +inpainting using morphological component analysis (mca), Applied and Computational Harmonic +Analysis, 19 (2005), pp. 340–358. +[91] M. J. Fadili, J.-L. Starck, J. Bobin, and Y. Moudden, Image decomposition and separation using +sparse representations: an overview, Proceedings of the IEEE, 98 (2009), pp. 983–994. +[92] L. Fan, F. Zhang, H. Fan, and C. Zhang, Brief review of image denoising techniques, Visual Com- + +52 +M. ELAD, B. KAWAR AND G. VAKSMAN +puting for Industry, Biomedicine, and Art, 2 (2019), pp. 1–12. +[93] M. A. Figueiredo and J. M. Bioucas-Dias, Restoration of poissonian images using alternating di- +rection optimization, IEEE transactions on Image Processing, 19 (2010), pp. 3133–3145. +[94] A. K. Fletcher, P. Pandit, S. Rangan, S. Sarkar, and P. Schniter, Plug-in estimation in +high-dimensional linear inverse problems: A rigorous analysis, in Advances in Neural Information +Processing Systems, 2018, pp. 7440–7449. +[95] R. Gal, Y. Alaluf, Y. Atzmon, O. Patashnik, A. H. Bermano, G. Chechik, and D. Cohen-Or, +An image is worth one word: Personalizing text-to-image generation using textual inversion, arXiv +preprint arXiv:2208.01618, (2022). +[96] D. Geman and G. Reynolds, Constrained restoration and the recovery of discontinuities, IEEE Trans- +actions on pattern analysis and machine intelligence, 14 (1992), pp. 367–383. +[97] S. Geman, Stochastic relaxation, gibbs distributions and bayesian restoration of images. ieee trans, +Pattn. Anal. Mach. Intell., 6 (1984), pp. 721–741. +[98] S. Ghael, A. M. Sayeed, and R. G. Baraniuk, Improved wavelet denoising via empirical wiener +filtering, in SPIE Technical Conference on Wavelet Applications in Signal Processing, 1997. +[99] G. Gilboa and S. Osher, Nonlocal operators with applications to image processing, Multiscale Modeling +& Simulation, 7 (2009), pp. 1005–1028. +[100] R. Giryes and M. Elad, Sparsity-based poisson denoising with dictionary learning, IEEE Transactions +on Image Processing, 23 (2014), pp. 5057–5069. +[101] X. Glorot, A. Bordes, and Y. Bengio, Deep sparse rectifier neural networks, in Proceedings of +the fourteenth international conference on artificial intelligence and statistics, JMLR Workshop and +Conference Proceedings, 2011, pp. 315–323. +[102] C. Godard, K. Matzen, and M. Uyttendaele, Deep burst denoising, in Proceedings of the European +conference on computer vision (ECCV), 2018, pp. 538–554. +[103] Y. E. G¨okda˘g, F. S¸ansal, and Y. D. G¨okdel, Image denoising using 2-d wavelet algorithm for +gaussian-corrupted confocal microscopy images, Biomedical Signal Processing and Control, 54 (2019), +p. 101594. +[104] G. H. Golub, P. C. Hansen, and D. P. O’Leary, Tikhonov regularization and total least squares, +SIAM Journal on Matrix Analysis and Applications, 21 (1999), pp. 185–194. +[105] K. Gong, J. Guan, C.-C. Liu, and J. Qi, Pet image denoising using a deep neural network through +fine tuning, IEEE Transactions on Radiation and Plasma Medical Sciences, 3 (2018), pp. 153–161. +[106] M. Gonzalez, J. Preciozzi, P. Mus´e, and A. Almansa, Joint denoising and decompression using cnn +regularization, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition +Workshops, 2018, pp. 2598–2601. +[107] I. +Goodfellow, +J. +Pouget-Abadie, +M. +Mirza, +B. +Xu, +D. +Warde-Farley, +S. +Ozair, +A. Courville, and Y. Bengio, Generative adversarial nets, Advances in Neural Information Pro- +cessing Systems, 27 (2014). +[108] B. Goossens, A. Pizurica, and W. Philips, Removal of correlated noise by modeling the signal of +interest in the wavelet domain, IEEE transactions on image processing, 18 (2009), pp. 1153–1165. +[109] K. Gregor and Y. LeCun, Learning fast approximations of sparse coding, in Proceedings of the 27th +international conference on international conference on machine learning, 2010, pp. 399–406. +[110] S. Gu, L. Zhang, W. Zuo, and X. Feng, Weighted nuclear norm minimization with application to +image denoising, in Proceedings of the IEEE conference on computer vision and pattern recognition, +2014, pp. 2862–2869. +[111] F. Guichard, L. Moisan, and J. M. Morel, A review of pde models in image processing and image +analysis, In Journal de Physique IV, 12 (2002), pp. 137–154. +[112] I. Gulrajani, F. Ahmed, M. Arjovsky, V. Dumoulin, and A. C. Courville, Improved training of +Wasserstein GANs, Advances in neural information processing systems, 30 (2017). +[113] S. Guo, Z. Liang, and L. Zhang, Joint denoising and demosaicking with green channel prior for +real-world burst images, IEEE Transactions on Image Processing, 30 (2021), pp. 6930–6942. +[114] S. Guo, Z. Yan, K. Zhang, W. Zuo, and L. Zhang, Toward convolutional blind denoising of real +photographs, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, +2019, pp. 1712–1722. +[115] J. Gurrola-Ramos, O. Dalmau, and T. E. Alarc´on, A residual dense u-net neural network for + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +53 +image denoising, IEEE Access, 9 (2021), pp. 31742–31754. +[116] X. Han, H. Zheng, and M. Zhou, CARD: Classification and regression diffusion models, arXiv preprint +arXiv:2206.07275, (2022). +[117] K. He, X. Zhang, S. Ren, and J. Sun, Deep residual learning for image recognition, in Proceedings +of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770–778. +[118] G. E. Hinton, Training products of experts by minimizing contrastive divergence, Neural computation, +14 (2002), pp. 1771–1800. +[119] J. Ho, W. Chan, C. Saharia, J. Whang, R. Gao, A. Gritsenko, D. P. Kingma, B. Poole, +M. Norouzi, D. J. Fleet, et al., Imagen video: High definition video generation with diffusion +models, arXiv preprint arXiv:2210.02303, (2022). +[120] J. Ho, A. Jain, and P. Abbeel, Denoising diffusion probabilistic models, in Advances in Neural +Information Processing Systems, vol. 33, Curran Associates, Inc., 2020, pp. 6840–6851. +[121] J. Ho, C. Saharia, W. Chan, D. J. Fleet, M. Norouzi, and T. Salimans, Cascaded diffusion +models for high fidelity image generation, Journal of Machine Learning Research, 23 (2022), pp. 1– +33. +[122] J. Ho and T. Salimans, Classifier-free diffusion guidance, in NeurIPS 2021 Workshop on Deep Gen- +erative Models and Downstream Applications, 2021. +[123] T. Hong, Y. Romano, and M. Elad, Acceleration of RED via vector extrapolation, Journal of Visual +Communication and Image Representation, 63 (2019), p. 102575. +[124] X. Hu, R. Ma, Z. Liu, Y. Cai, X. Zhao, Y. Zhang, and H. Wang, Pseudo 3d auto-correlation +network for real image denoising, in Proceedings of the IEEE/CVF Conference on Computer Vision +and Pattern Recognition, 2021, pp. 16175–16184. +[125] Y. Huang, S. Li, L. Wang, T. Tan, et al., Unfolding the alternating optimization for blind super +resolution, Advances in Neural Information Processing Systems, 33 (2020), pp. 5632–5643. +[126] P. Huber, Robust statistics. new-york: John wiley and sons, HuberRobust statistics1981, (1981). +[127] I. A. Ibragimov, I. V. Linnik, and J. F. C. Kingman, Independent and stationary sequences of +random variables, Monographs and textbooks on pure and applied mathematics, Wolters-Noordhoff., +1971. +[128] A. Ignatov, K. Byeoung-Su, R. Timofte, and A. Pouget, Fast camera image denoising on mo- +bile gpus with deep learning, mobile ai 2021 challenge: Report, in Proceedings of the IEEE/CVF +Conference on Computer Vision and Pattern Recognition, 2021, pp. 2515–2524. +[129] A. Jain, A. Xie, and P. Abbeel, Vectorfusion: Text-to-svg by abstracting pixel-based diffusion models, +arXiv preprint arXiv:2211.11319, (2022). +[130] P. Jain and V. Tyagi, A survey of edge-preserving image denoising methods, Information Systems +Frontiers, 18 (2016), pp. 159–170. +[131] V. Jain and S. Seung, Natural image denoising with convolutional networks, Advances in neural +information processing systems, 21 (2008). +[132] E. T. Jaynes, Probability theory: The logic of science, Cambridge university press, 2003. +[133] X. Jia, S. Liu, X. Feng, and L. Zhang, Focnet: A fractional optimal control network for image +denoising, 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), +(2019), pp. 6047–6056. +[134] K. H. Jin and J. C. Ye, Annihilating filter-based low-rank hankel matrix approach for image inpainting, +IEEE Transactions on Image Processing, 24 (2015), pp. 3498–3511. +[135] A. Jolicoeur-Martineau, K. Li, R. Pich´e-Taillefer, T. Kachman, and I. Mitliagkas, Gotta go +fast when generating data with score-based models, arXiv preprint arXiv:2105.14080, (2021). +[136] I. T. Jolliffe and J. Cadima, Principal component analysis: a review and recent developments, +Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences, +374 (2016), p. 20150202. +[137] J. Joyce and E. N. Zalta, Bayes’ theorem, The Stanford Encyclopedia of Philosophy, 28 (2003). +[138] Z. Kadkhodaie and E. Simoncelli, Stochastic solutions for linear inverse problems using the prior +implicit in a denoiser, Advances in Neural Information Processing Systems, 34 (2021), pp. 13242– +13254. +[139] U. S. Kamilov, H. Mansour, and B. Wohlberg, A plug-and-play priors approach for solving non- +linear imaging inverse problems, IEEE Signal Processing Letters, 24 (2017), pp. 1872–1876. + +54 +M. ELAD, B. KAWAR AND G. VAKSMAN +[140] K. Karhunen, ¨Uber lineare methoden in der wahrscheinlichkeitsrechnung, Ann. Acad. Sci. Fennicea, +A137 (1947). +[141] T. Karras, S. Laine, M. Aittala, J. Hellsten, J. Lehtinen, and T. Aila, Analyzing and improving +the image quality of stylegan, in Proceedings of the IEEE/CVF Conference on Computer Vision and +Pattern Recognition, 2020, pp. 8110–8119. +[142] B. Kawar, M. Elad, S. Ermon, and J. Song, Denoising diffusion restoration models, in Advances +in Neural Information Processing Systems, 2022. +[143] B. Kawar, R. Ganz, and M. Elad, Enhancing diffusion-based image synthesis with robust classifier +guidance, arXiv preprint arXiv:2208.08664, (2022). +[144] B. Kawar, J. Song, S. Ermon, and M. Elad, JPEG artifact correction using denoising diffusion +restoration models, in Neural Information Processing Systems (NeurIPS) Workshop on Score-Based +Methods, 2022. +[145] B. Kawar, G. Vaksman, and M. Elad, SNIPS: solving noisy inverse problems stochastically, Advances +in Neural Information Processing Systems, 34 (2021), pp. 21757–21769. +[146] B. Kawar, G. Vaksman, and M. Elad, Stochastic image denoising by sampling from the posterior +distribution, in Proceedings of the IEEE/CVF International Conference on Computer Vision Work- +shops, 2021, pp. 1866–1875. +[147] B. Kawar, S. Zada, O. Lang, O. Tov, H. Chang, T. Dekel, I. Mosseri, and M. Irani, Imagic: +Text-based real image editing with diffusion models, arXiv preprint arXiv:2210.09276, (2022). +[148] S. A. Khowaja, B. N. Yahya, and S.-L. Lee, Cascaded and recursive convnets (crcnn): An effective +and flexible approach for image denoising, Signal Processing: Image Communication, 99 (2021), +p. 116420. +[149] S.-U. Kim, An image denoising algorithm for the mobile phone cameras, The Journal of the Korea +institute of electronic communication sciences, 9 (2014), pp. 601–608. +[150] D. P. Kingma and P. Dhariwal, Glow: Generative flow with invertible 1x1 convolutions, Advances +in neural information processing systems, 31 (2018). +[151] D. P. Kingma and M. Welling, Auto-encoding variational bayes, in International Conference on +Learning Representations, 2014. +[152] A. Krull, T.-O. Buchholz, and F. Jug, Noise2void-learning denoising from single noisy images, +in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, +pp. 2129–2137. +[153] G. Kutyniok and W.-Q. Lim, Image separation using wavelets and shearlets, in International Confer- +ence on Curves and Surfaces, Springer, 2010, pp. 416–430. +[154] R. L. Lagendijk and J. Biemond, Basic methods for image restoration and identification, in The +Essential Guide to Image Processing, Elsevier, 2009, pp. 323–348. +[155] R. Laumont, V. De Bortoli, A. Almansa, J. Delon, A. Durmus, and M. Pereyra, Bayesian +imaging using plug & play priors: when Langevin meets Tweedie, arXiv preprint arXiv:2103.04715, +(2021). +[156] M. Lebrun, M. Colom, A. Buades, and J.-M. Morel, Secrets of image denoising cuisine, Acta +Numerica, 21 (2012), pp. 475–576. +[157] M. Lebrun, M. Colom, and J.-M. Morel, The noise clinic: A universal blind denoising algorithm, +in 2014 IEEE International Conference on Image Processing (ICIP), IEEE, 2014, pp. 2674–2678. +[158] S. Lee, M. Negishi, H. Urakubo, H. Kasai, and S. Ishii, Mu-net: Multi-scale u-net for two-photon +microscopy image denoising and restoration, Neural Networks, 125 (2020), pp. 92–103. +[159] S. Lefkimmiatis, Universal denoising networks: a novel cnn architecture for image denoising, in Pro- +ceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp. 3204–3213. +[160] J. Lehtinen, J. Munkberg, J. Hasselgren, S. Laine, T. Karras, M. Aittala, and T. Aila, +Noise2noise: Learning image restoration without clean data, in International Conference on Machine +Learning, PMLR, 2018, pp. 2965–2974. +[161] C. Lei, Y. Xing, and Q. Chen, Blind video temporal consistency via deep video prior, ArXiv, +abs/2010.11838 (2020). +[162] A. Levin and B. Nadler, Natural image denoising: Optimality and inherent bounds, in IEEE CVPR, +2011, pp. 2833–2840. +[163] A. Levin, B. Nadler, F. Durand, and W. T. Freeman, Patch complexity, finite pixel correlations + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +55 +and optimal denoising, in European Conference on Computer Vision, Springer, 2012, pp. 73–86. +[164] Z. Li, L. Yu, J. D. Trzasko, D. S. Lake, D. J. Blezek, J. G. Fletcher, C. H. McCollough, and +A. Manduca, Adaptive nonlocal means filtering based on local noise level for ct denoising, Medical +physics, 41 (2014), p. 011908. +[165] J. Liang, J. Cao, G. Sun, K. Zhang, L. Van Gool, and R. Timofte, SwinIR: Image restoration +using swin transformer, in Proceedings of the IEEE/CVF International Conference on Computer +Vision, 2021, pp. 1833–1844. +[166] Z. Liang, S. Guo, H. Gu, H. Zhang, and L. Zhang, A decoupled learning scheme for real-world +burst denoising from raw images, in European Conference on Computer Vision, Springer, 2020, +pp. 150–166. +[167] D. Liu, B. Wen, Y. Fan, C. C. Loy, and T. S. Huang, Non-local recurrent network for image +restoration, in Advances in Neural Information Processing Systems, 2018, pp. 1673–1682. +[168] P. Liu, H. Zhang, K. Zhang, L. Lin, and W. Zuo, Multi-level wavelet-cnn for image restoration, +2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), +(2018), pp. 886–88609. +[169] X. Liu, M. Tanaka, and M. Okutomi, Single-image noise level estimation for blind denoising, IEEE +transactions on image processing, 22 (2013), pp. 5226–5237. +[170] Y. Liu, Z. Qin, S. Anwar, P. Ji, D. Kim, S. Caldwell, and T. Gedeon, Invertible denoising +network: A light solution for real noise removal, in Proceedings of the IEEE/CVF conference on +computer vision and pattern recognition, 2021, pp. 13365–13374. +[171] Z. Liu, Y. Lin, Y. Cao, H. Hu, Y. Wei, Z. Zhang, S. Lin, and B. Guo, Swin transformer: +Hierarchical vision transformer using shifted windows, 2021 IEEE/CVF International Conference +on Computer Vision (ICCV), (2021), pp. 9992–10002. +[172] Z. Liu, P. Luo, X. Wang, and X. Tang, Deep learning face attributes in the wild, in Proceedings of +the IEEE International Conference on Computer Vision, 2015, pp. 3730–3738. +[173] Z. Liu, L. Yuan, X. Tang, M. Uyttendaele, and J. Sun, Fast burst images denoising, ACM +Transactions on Graphics (TOG), 33 (2014), pp. 1–9. +[174] M. Lo´eve, Fonctions al´eatoires de second order, CR. Acad. Sci. Paris, 220 (1945). +[175] F. Luisier, T. Blu, and M. Unser, A new sure approach to image denoising: Interscale orthonormal +wavelet thresholding, IEEE Transactions on image processing, 16 (2007), pp. 593–606. +[176] F. Luisier, T. Blu, and M. Unser, Image denoising in mixed poisson–gaussian noise, IEEE Trans- +actions on image processing, 20 (2010), pp. 696–708. +[177] F. Luisier, C. Vonesch, T. Blu, and M. Unser, Fast interscale wavelet denoising of poisson-corrupted +images, Signal processing, 90 (2010), pp. 415–427. +[178] S. Luo and W. Hu, Score-based point cloud denoising, in Proceedings of the IEEE/CVF International +Conference on Computer Vision (ICCV), October 2021, pp. 4583–4592. +[179] M. Maggioni, G. Boracchi, A. Foi, and K. Egiazarian, Video denoising using separable 4d nonlocal +spatiotemporal transforms, in Image Processing: Algorithms and Systems IX, vol. 7870, International +Society for Optics and Photonics, 2011, p. 787003. +[180] M. Maggioni, Y. Huang, C. Li, S. Xiao, Z. Fu, and F. Song, Efficient multi-stage video denoising +with recurrent spatio-temporal fusion, 2021 IEEE/CVF Conference on Computer Vision and Pattern +Recognition (CVPR), (2021), pp. 3465–3474. +[181] J. Mairal, F. Bach, J. Ponce, G. Sapiro, and A. Zisserman, Non-local sparse models for image +restoration, in IEEE 12th international conference on computer vision, 2009, pp. 2272–2279. +[182] J. Mairal, M. Elad, and G. Sapiro, Sparse representation for color image restoration, IEEE Trans- +actions on Image Processing, 17 (2008), pp. 53–69. +[183] J. Mairal, G. Sapiro, and M. Elad, Multiscale sparse image representation with learned dictionaries, +in 2007 IEEE International Conference on Image Processing, vol. 3, IEEE, 1997, pp. III–105. +[184] M. Makitalo and A. Foi, Optimal inversion of the anscombe transformation in low-count poisson +image denoising, IEEE transactions on Image Processing, 20 (2010), pp. 99–109. +[185] S. Mallat, A wavelet tour of signal processing, Elsevier, 1999. +[186] B. Manifold, E. Thomas, A. T. Francis, A. H. Hill, and D. Fu, Denoising of stimulated raman +scattering microscopy images via deep learning, Biomedical optics express, 10 (2019), pp. 3860–3874. +[187] J. V. Manj´on, P. Coup´e, L. Mart´ı-Bonmat´ı, D. L. Collins, and M. Robles, Adaptive non-local + +56 +M. ELAD, B. KAWAR AND G. VAKSMAN +means denoising of mr images with spatially varying noise levels, Journal of Magnetic Resonance +Imaging, 31 (2010), pp. 192–203. +[188] V. Mannam, Y. Zhang, Y. Zhu, E. Nichols, Q. Wang, V. Sundaresan, S. Zhang, C. Smith, +P. W. Bohn, and S. S. Howard, Real-time image denoising of mixed poisson–gaussian noise in +fluorescence microscopy images using imagej, Optica, 9 (2022), pp. 335–345. +[189] T. Marinˇc, V. Srinivasan, S. G¨ul, C. Hellge, and W. Samek, Multi-kernel prediction networks +for denoising of burst images, in 2019 IEEE International Conference on Image Processing (ICIP), +IEEE, 2019, pp. 2404–2408. +[190] D. R. Martin, C. C. Fowlkes, D. Tal, and J. Malik, A database of human segmented natural +images and its application to evaluating segmentation algorithms and measuring ecological statistics, +Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, 2 (2001), +pp. 416–423 vol.2. +[191] E. Martinec, Noise, dynamic range and bit depth in digital slrs, The University of Chicago, (2008). +[192] G. Mataev, P. Milanfar, and M. Elad, DeepRED: Deep image prior powered by RED, in Proceedings +of the IEEE International Conference on Computer Vision Workshops, 2019, pp. 0–0. +[193] M. Matrecano, G. Poggi, and L. Verdoliva, Improved bm3d for correlated noise removal., in +VISAPP (1), 2012, pp. 129–134. +[194] T. Meinhardt, M. Moller, C. Hazirbas, and D. Cremers, Learning proximal operators: Using de- +noising networks for regularizing inverse imaging problems, in Proceedings of the IEEE International +Conference on Computer Vision, 2017, pp. 1781–1790. +[195] X. Meng and Y. Kabashima, Diffusion model based posterior sampling for noisy linear inverse prob- +lems, arXiv preprint arXiv:2211.12343, (2022). +[196] C. A. Metzler, A. Maleki, and R. G. Baraniuk, From denoising to compressed sensing, IEEE +Transactions on Information Theory, 62 (2016), pp. 5117–5144. +[197] P. Milanfar, A tour of modern image filtering: New insights and methods, both practical and theoretical, +IEEE signal processing magazine, 30 (2012), pp. 106–128. +[198] B. Mildenhall, J. T. Barron, J. Chen, D. Sharlet, R. Ng, and R. Carroll, Burst denoising +with kernel prediction networks, in Proceedings of the IEEE conference on computer vision and +pattern recognition, 2018, pp. 2502–2510. +[199] J. Miskin and D. J. MacKay, Ensemble learning for blind image separation and deconvolution, in +Advances in independent component analysis, Springer, 2000, pp. 123–141. +[200] K. Miyasawa, An empirical Bayes estimator of the mean of a normal population, Bull. Inst. Internat. +Statist., 38 (1961), pp. 181–188. +[201] S. Mohan, Z. Kadkhodaie, E. P. Simoncelli, and C. Fernandez-Granda, Robust and interpretable +blind image denoising via bias-free convolutional neural networks, in International Conference on +Learning Representations, 2019. +[202] R. Mokady, A. Hertz, K. Aberman, Y. Pritch, and D. Cohen-Or, Null-text inversion for editing +real images using guided diffusion models, arXiv preprint arXiv:2211.09794, (2022). +[203] I. Mosseri, M. Zontak, and M. Irani, Combining the power of internal and external denoising, in +IEEE international conference on computational photography (ICCP), IEEE, 2013, pp. 1–9. +[204] C. Mou, Q. Wang, and J. Zhang, Deep generalized unfolding networks for image restoration, in +Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, +pp. 17399–17410. +[205] K. P. Murphy, Machine learning: a probabilistic perspective, MIT press, 2012. +[206] S. Neville and N. Dimopoulos, Wavelet denoising of coarsely quantized signals, IEEE Transactions +on Instrumentation and Measurement, 55 (2006), pp. 892–901. +[207] H. V. Nguyen, M. O. Ulfarsson, and J. R. Sveinsson, Hyperspectral image denoising using sure- +based unsupervised convolutional neural networks, IEEE Transactions on Geoscience and Remote +Sensing, 59 (2020), pp. 3369–3382. +[208] A. Q. Nichol and P. Dhariwal, Improved denoising diffusion probabilistic models, in International +Conference on Machine Learning, PMLR, 2021, pp. 8162–8171. +[209] W. Nie, B. Guo, Y. Huang, C. Xiao, A. Vahdat, and A. Anandkumar, Diffusion models for +adversarial purification, in International Conference on Machine Learning (ICML), 2022. +[210] M. Nikolova, A variational approach to remove outliers and impulse noise, Journal of Mathematical + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +57 +Imaging and Vision, 20 (2004), pp. 99–120. +[211] G. Ohayon, T. Adrai, M. Elad, and T. Michaeli, Reasons for the superiority of stochastic es- +timators over deterministic ones: Robustness, consistency and perceptual quality, arXiv preprint +arXiv:2211.08944, (2022). +[212] G. Ohayon, T. Adrai, G. Vaksman, M. Elad, and P. Milanfar, High perceptual quality image de- +noising with a posterior sampling CGAN, in Proceedings of the IEEE/CVF International Conference +on Computer Vision, 2021, pp. 1805–1813. +[213] Z. Pan, X. Zhou, and H. Tian, Extreme generative image compression by learning text embedding +from diffusion models, arXiv preprint arXiv:2211.07793, (2022). +[214] Y. C. Pati, R. Rezaiifar, and P. S. Krishnaprasad, Orthogonal matching pursuit: Recursive func- +tion approximation with applications to wavelet decomposition, in Proceedings of 27th Asilomar +conference on signals, systems and computers, IEEE, 1993, pp. 40–44. +[215] K. Pearson, On lines and planes of closest fit to systems of points in space, The London, Edinburgh, +and Dublin philosophical magazine and journal of science, 2 (1901), pp. 559–572. +[216] P. Perona and J. Malik, Scale-space and edge detection using anisotropic diffusion, IEEE Transactions +on pattern analysis and machine intelligence, 12 (1990), pp. 629–639. +[217] H. Pishro-Nik, Introduction to probability, statistics and random processes, (2014). +[218] T. Pl¨otz and S. Roth, Neural nearest neighbors networks, in Neural Information Processing Systems, +2018. +[219] N. N. Ponomarenko, V. V. Lukin, A. A. Zelensky, J. T. Astola, and K. O. Egiazarian, +Adaptive dct-based filtering of images corrupted by spatially correlated noise, in Image processing: +algorithms and systems VI, vol. 6812, SPIE, 2008, pp. 285–295. +[220] B. Poole, A. Jain, J. T. Barron, and B. Mildenhall, Dreamfusion: Text-to-3d using 2d diffusion, +arXiv preprint arXiv:2209.14988, (2022). +[221] J. Portilla, V. Strela, M. J. Wainwright, and E. P. Simoncelli, Image denoising using scale +mixtures of gaussians in the wavelet domain, IEEE Transactions on Image processing, 12 (2003), +pp. 1338–1351. +[222] A. Radford, L. Metz, and S. Chintala, Unsupervised representation learning with deep convolutional +generative adversarial networks, in 4th International Conference on Learning Representations, ICLR, +2016. +[223] J. Rajan, K. Kannan, and M. Kaimal, An improved hybrid model for molecular image denoising, +Journal of Mathematical Imaging and Vision, 31 (2008), pp. 73–79. +[224] A. Ramesh, P. Dhariwal, A. Nichol, C. Chu, and M. Chen, Hierarchical text-conditional image +generation with clip latents, arXiv preprint arXiv:2204.06125, (2022). +[225] E. T. Reehorst and P. Schniter, Regularization by denoising: Clarifications and new interpretations, +IEEE Transactions on computational imaging, 5 (2018), pp. 52–67. +[226] T. Remez, O. Litany, R. Giryes, and A. M. Bronstein, Class-aware fully convolutional gaussian +and poisson denoising, IEEE Transactions on Image Processing, 27 (2018), pp. 5707–5722. +[227] M. P. Reymann, T. W¨urfl, P. Ritt, B. Stimpel, M. Cachovan, A. H. Vija, and A. Maier, +U-net for SPECT image denoising, in 2019 IEEE Nuclear Science Symposium and Medical Imaging +Conference (NSS/MIC), IEEE, 2019, pp. 1–2. +[228] D. Rezende and S. Mohamed, Variational inference with normalizing flows, in International Confer- +ence on Machine Learning, PMLR, 2015, pp. 1530–1538. +[229] J. Rick Chang, C.-L. Li, B. Poczos, B. Vijaya Kumar, and A. C. Sankaranarayanan, One net- +work to solve them all: Solving linear inverse problems using deep projection models, in Proceedings +of the IEEE International Conference on Computer Vision, 2017, pp. 5888–5897. +[230] G. O. Roberts, R. L. Tweedie, et al., Exponential convergence of langevin distributions and their +discrete approximations, Bernoulli, 2 (1996), pp. 341–363. +[231] Y. Romano, M. Elad, and P. Milanfar, The little engine that could: Regularization by denoising +(RED), SIAM Journal on Imaging Sciences, 10 (2017), pp. 1804–1844. +[232] R. Rombach, A. Blattmann, D. Lorenz, P. Esser, and B. Ommer, High-resolution image synthesis +with latent diffusion models, in Proceedings of the IEEE/CVF Conference on Computer Vision and +Pattern Recognition, 2022, pp. 10684–10695. +[233] A. Rond, R. Giryes, and M. Elad, Poisson inverse problems by the plug-and-play scheme, Journal + +58 +M. ELAD, B. KAWAR AND G. VAKSMAN +of Visual Communication and Image Representation, 41 (2016), pp. 96–108. +[234] O. Ronneberger, P. Fischer, and T. Brox, U-net: Convolutional networks for biomedical image +segmentation, in International Conference on Medical image computing and computer-assisted in- +tervention, Springer, 2015, pp. 234–241. +[235] R. J. Rossi, Mathematical statistics: an introduction to likelihood based inference, John Wiley & Sons, +2018. +[236] S. Roth and M. J. Black, Fields of experts: a framework for learning image priors, 2005 IEEE +Computer Society Conference on Computer Vision and Pattern Recognition (CVPR’05), 2 (2005), +pp. 860–867 vol. 2. +[237] S. Roth and M. J. Black, Fields of experts, International Journal of Computer Vision, 82 (2009), +pp. 205–229. +[238] P. J. Rousseeuw, F. R. Hampel, E. M. Ronchetti, and W. A. Stahel, Robust statistics: the +approach based on influence functions, John Wiley & Sons, 2011. +[239] D. L. Ruderman, The statistics of natural images, Network: Computation in Neural Systems, 5 (1994), +pp. 517–548. +[240] L. I. Rudin, S. Osher, and E. Fatemi, Nonlinear total variation based noise removal algorithms, +Physica D: nonlinear phenomena, 60 (1992), pp. 259–268. +[241] N. Ruiz, Y. Li, V. Jampani, Y. Pritch, M. Rubinstein, and K. Aberman, Dreambooth: Fine +tuning text-to-image diffusion models for subject-driven generation, arXiv preprint arXiv:2208.12242, +(2022). +[242] D. E. Rumelhart, G. E. Hinton, and R. J. Williams, Learning representations by back-propagating +errors, nature, 323 (1986), pp. 533–536. +[243] C. Saharia, W. Chan, H. Chang, C. Lee, J. Ho, T. Salimans, D. Fleet, and M. Norouzi, +Palette: Image-to-image diffusion models, in ACM SIGGRAPH 2022 Conference Proceedings, 2022, +pp. 1–10. +[244] C. Saharia, W. Chan, S. Saxena, L. Li, J. Whang, E. Denton, S. K. S. Ghasemipour, B. K. +Ayan, S. S. Mahdavi, R. G. Lopes, et al., Photorealistic text-to-image diffusion models with +deep language understanding, arXiv preprint arXiv:2205.11487, (2022). +[245] C. Saharia, J. Ho, W. Chan, T. Salimans, D. J. Fleet, and M. Norouzi, Image super-resolution +via iterative refinement, IEEE Transactions on Pattern Analysis and Machine Intelligence, (2022). +[246] P. Saint-Marc, J.-S. Chen, and G. Medioni, Adaptive smoothing: A general tool for early vision, +IEEE Transactions on Pattern Analysis & Machine Intelligence, 13 (1991), pp. 514–529. +[247] T. Salimans and J. Ho, Progressive distillation for fast sampling of diffusion models, arXiv preprint +arXiv:2202.00512, (2022). +[248] J. Salmon, Z. Harmany, C.-A. Deledalle, and R. Willett, Poisson noise reduction with non-local +pca, Journal of mathematical imaging and vision, 48 (2014), pp. 279–294. +[249] A. Sauer, K. Schwarz, and A. Geiger, StyleGAN-XL: Scaling StyleGAN to large diverse datasets, +arXiv preprint arXiv:2202.00273, (2022). +[250] M. Scetbon, M. Elad, and P. Milanfar, Deep K-SVD denoising, IEEE Transactions on Image +Processing, 30 (2021), pp. 5944–5955. +[251] R. W. Schafer, R. M. Mersereau, and M. A. Richards, Constrained iterative restoration algo- +rithms, Proceedings of the IEEE, 69 (1981), pp. 432–450. +[252] U. Schmidt and S. Roth, Shrinkage fields for effective image restoration, 2014 IEEE Conference on +Computer Vision and Pattern Recognition, (2014), pp. 2774–2781. +[253] Y. Shi, V. De Bortoli, G. Deligiannidis, and A. Doucet, Conditional simulation using diffusion +Schr¨odinger bridges, arXiv preprint arXiv:2202.13460, (2022). +[254] A. Sinha, J. Song, C. Meng, and S. Ermon, D2c: Diffusion-decoding models for few-shot conditional +generation, Advances in Neural Information Processing Systems, 34 (2021), pp. 12533–12548. +[255] N. Sochen, R. Kimmel, and R. Malladi, A general framework for low level vision, IEEE transactions +on image processing, 7 (1998), pp. 310–318. +[256] J. W. Soh and N. I. Cho, Deep universal blind image denoising, in 2020 25th International Conference +on Pattern Recognition (ICPR), IEEE, 2021, pp. 747–754. +[257] J. Sohl-Dickstein, E. Weiss, N. Maheswaranathan, and S. Ganguli, Deep unsupervised learning +using nonequilibrium thermodynamics, in International Conference on Machine Learning, PMLR, + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +59 +2015, pp. 2256–2265. +[258] J. Song, C. Meng, and S. Ermon, Denoising diffusion implicit models, in International Conference +on Learning Representations, April 2021. +[259] M. Song, Y. Zhang, and T. O. Aydin, Tempformer: Temporally consistent transformer for video +denoising, in European Conference on Computer Vision, 2022. +[260] Y. Song and S. Ermon, Generative modeling by estimating gradients of the data distribution, in +Advances in Neural Information Processing Systems, 2019, pp. 11918–11930. +[261] Y. Song and S. Ermon, Improved techniques for training score-based generative models, in Advances +in Neural Information Processing Systems, 33, 2020. +[262] Y. Song, J. Sohl-Dickstein, D. P. Kingma, A. Kumar, S. Ermon, and B. Poole, Score-based +generative modeling through stochastic differential equations, in International Conference on Learning +Representations, 2021. +[263] S. Sreehari, S. V. Venkatakrishnan, B. Wohlberg, G. T. Buzzard, L. F. Drummy, J. P. +Simmons, and C. A. Bouman, Plug-and-play priors for bright field electron tomography and sparse +interpolation, IEEE Transactions on Computational Imaging, 2 (2016), pp. 408–423. +[264] K. Srinivasan and D. Ebenezer, A new fast and efficient decision-based algorithm for removal of +high-density impulse noises, IEEE Signal Processing Letters, 14 (2007), pp. 189–192. +[265] C. M. Stein, Estimation of the mean of a multivariate normal distribution, The annals of Statistics, +(1981), pp. 1135–1151. +[266] J. Sun, Y. Du, C. Li, T.-H. Wu, B. Yang, and G. S. Mok, Pix2pix generative adversarial network +for low dose myocardial perfusion SPECT denoising, Quantitative Imaging in Medicine and Surgery, +12 (2022), p. 3539. +[267] Y. Sun, J. Liu, and U. Kamilov, Block coordinate regularization by denoising, in Advances in Neural +Information Processing Systems, 2019, pp. 380–390. +[268] Y. Sun, B. Wohlberg, and U. S. Kamilov, An online plug-and-play algorithm for regularized image +reconstruction, IEEE Transactions on Computational Imaging, 5 (2019), pp. 395–408. +[269] Y. Sun, Z. Wu, X. Xu, B. Wohlberg, and U. S. Kamilov, Scalable plug-and-play admm with +convergence guarantees, IEEE Transactions on Computational Imaging, 7 (2021), pp. 849–863. +[270] Y. Tai, J. Yang, X. Liu, and C. Xu, Memnet: A persistent memory network for image restoration, +in Proceedings of the IEEE international conference on computer vision, 2017, pp. 4539–4547. +[271] H. Takeda, S. Farsiu, and P. Milanfar, Kernel regression for image processing and reconstruction, +IEEE Transactions on image processing, 16 (2007), pp. 349–366. +[272] H. Talbot, H. Phelippeau, M. Akil, and S. Bara, Efficient poisson denoising for photography, in +2009 16th IEEE International Conference on Image Processing (ICIP), IEEE, 2009, pp. 3881–3884. +[273] H. Talebi and P. Milanfar, Global image denoising, IEEE Transactions on Image Processing, 23 +(2013), pp. 755–768. +[274] T. Tasdizen, Principal neighborhood dictionaries for nonlocal means image denoising, IEEE Transac- +tions on Image Processing, 18 (2009), pp. 2649–2660. +[275] M. Tassano, J. Delon, and T. Veit, Dvdnet: A fast network for deep video denoising, in 2019 IEEE +International Conference on Image Processing (ICIP), IEEE, 2019, pp. 1805–1809. +[276] M. Tassano, J. Delon, and T. Veit, Fastdvdnet: Towards real-time deep video denoising without +flow estimation, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern +Recognition, 2020, pp. 1354–1363. +[277] P. Tchebycheff, Sur deux th´eor`emes relatifs aux probabilit´es, Acta Mathematica, 14 (1890), pp. 305 +– 315. +[278] A. M. Teodoro, J. M. Bioucas-Dias, and M. A. Figueiredo, Scene-adapted plug-and-play algorithm +with convergence guarantees, in IEEE 27th International Workshop on Machine Learning for Signal +Processing (MLSP), 2017, pp. 1–6. +[279] A. M. Teodoro, J. M. Bioucas-Dias, and M. A. Figueiredo, A convergent image fusion algorithm +using scene-adapted Gaussian-mixture-based denoising, IEEE Transactions on Image Processing, 28 +(2018), pp. 451–463. +[280] A. M. Teodoro, J. M. Bioucas-Dias, and M. A. Figueiredo, Image restoration and reconstruc- +tion using targeted plug-and-play priors, IEEE Transactions on Computational Imaging, 5 (2019), +pp. 675–686. + +60 +M. ELAD, B. KAWAR AND G. VAKSMAN +[281] C. Tian, L. Fei, W. Zheng, Y. Xu, W. Zuo, and C.-W. Lin, Deep learning on image denoising: An +overview, Neural Networks, 131 (2020), pp. 251–275. +[282] A. Tikhonov and V. Arsenin, Solution of Ill-posed Problems, Washington: Winston & Sons, 1977. +[283] T. Tirer and R. Giryes, Image restoration by iterative denoising and backward projections, IEEE +Transactions on Image Processing, 28 (2018), pp. 1220–1234. +[284] C. Tomasi and R. Manduchi, Bilateral filtering for gray and color images, in Sixth international +conference on computer vision (IEEE Cat. No. 98CH36271), IEEE, 1998, pp. 839–846. +[285] L. D. Tran, S. M. Nguyen, and M. Arai, Gan-based noise model for denoising real images, in +Proceedings of the Asian Conference on Computer Vision, 2020. +[286] D. Ulyanov, A. Vedaldi, and V. Lempitsky, Deep image prior, in Proceedings of the IEEE CVPR, +2018, pp. 9446–9454. +[287] A. Vahdat, K. Kreis, and J. Kautz, Score-based generative modeling in latent space, in Neural +Information Processing Systems (NeurIPS), 2021. +[288] G. Vaksman and M. Elad, Patch-craft self-supervised training for correlated image denoising, arXiv +preprint arXiv:2211.09919, (2022). +[289] G. Vaksman, M. Elad, and P. Milanfar, LIDIA: Lightweight learned image denoising with instance +adaptation, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recog- +nition Workshops, 2020, pp. 524–525. +[290] G. Vaksman, M. Elad, and P. Milanfar, Patch craft: Video denoising by deep modeling and patch +matching, 2021 IEEE/CVF International Conference on Computer Vision (ICCV), (2021), pp. 2137– +2146. +[291] G. Vaksman, M. Zibulevsky, and M. Elad, Patch ordering as a regularization for inverse problems +in image processing, SIAM Journal on Imaging Sciences, 9 (2016), pp. 287–319. +[292] D. Valsesia, G. Fracastoro, and E. Magli, Deep graph-convolutional image denoising, IEEE Trans- +actions on Image Processing, 29 (2019), pp. 8226–8237. +[293] A. Van Den Oord, N. Kalchbrenner, and K. Kavukcuoglu, Pixel recurrent neural networks, in +International Conference on Machine Learning, PMLR, 2016, pp. 1747–1756. +[294] M. Vatsa, R. Singh, and A. Noore, Denoising and segmentation of 3d brain images., IPCV, 9 (2009), +pp. 561–567. +[295] S. V. Venkatakrishnan, C. A. Bouman, and B. Wohlberg, Plug-and-play priors for model based +reconstruction, in 2013 IEEE Global Conference on Signal and Information Processing, IEEE, 2013, +pp. 945–948. +[296] P. Vincent, A connection between score matching and denoising autoencoders, Neural computation, 23 +(2011), pp. 1661–1674. +[297] C. Wang, J. Zhou, and S. Liu, Adaptive non-local means filter for image deblocking, Signal Processing: +Image Communication, 28 (2013), pp. 522–530. +[298] Y. Wang, H. Huang, Q. Xu, J. Liu, Y. Liu, and J. Wang, Practical deep raw image denoising on +mobile devices, in European Conference on Computer Vision, Springer, 2020, pp. 1–16. +[299] Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, Image quality assessment: from error +visibility to structural similarity, IEEE transactions on image processing, 13 (2004), pp. 600–612. +[300] Z. Wang and D. Zhang, Progressive switching median filter for the removal of impulse noise from +highly corrupted images, IEEE Transactions on Circuits and Systems Ii: Analog and Digital Signal +Processing, 46 (1999), pp. 78–80. +[301] K. Wei, Y. Fu, J. Yang, and H. Huang, A physics-based noise formation model for extreme low- +light raw denoising, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern +Recognition, 2020, pp. 2758–2767. +[302] J. Weickert, Anisotropic diffusion in image processing, Stuttgart: Teubner, 1998. +[303] J. Whang, M. Delbracio, H. Talebi, C. Saharia, A. G. Dimakis, and P. Milanfar, Deblurring +via stochastic refinement, in Proceedings of the IEEE/CVF Conference on Computer Vision and +Pattern Recognition, 2022, pp. 16293–16303. +[304] N. Wiener, Extrapolation, interpolation, and smoothing of stationary time series: with engineering +applications, vol. 113, MIT press Cambridge, MA, 1949. +[305] J. Wright, A. Ganesh, S. Rao, Y. Peng, and Y. Ma, Robust principal component analysis: Exact +recovery of corrupted low-rank matrices via convex optimization, Advances in neural information + +IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND +61 +processing systems, 22 (2009). +[306] J. Xie, L. Xu, and E. Chen, Image denoising and inpainting with deep neural networks, in NIPS, 2012. +[307] J. Xu, L. Zhang, and D. Zhang, A trilateral weighted sparse coding scheme for real-world image +denoising, in Proceedings of the European conference on computer vision (ECCV), 2018, pp. 20–36. +[308] Q. Xu, C. Zhang, and L. Zhang, Denoising convolutional neural network, in 2015 IEEE International +Conference on Information and Automation, IEEE, 2015, pp. 1184–1187. +[309] X. Xu, Y. Sun, J. Liu, B. Wohlberg, and U. S. Kamilov, Provable convergence of plug-and-play +priors with MMSE denoisers, arXiv preprint arXiv:2005.07685, (2020). +[310] N. Yair and T. Michaeli, Multi-scale weighted nuclear norm image restoration, in Proceedings of the +IEEE conference on computer vision and pattern recognition, 2018, pp. 3165–3174. +[311] J. C. K. Yan, P. Campisi, and D. Hatzinakos, Film grain noise removal and generation for color +images, in Proceedings of the 1998 IEEE International Conference on Acoustics, Speech and Signal +Processing, ICASSP’98 (Cat. No. 98CH36181), vol. 5, IEEE, 1998, pp. 2957–2960. +[312] J. C. K. Yan and D. Hatzinakos, Signal-dependent film grain noise removal and generation based +on higher-order statistics, Proceedings of the IEEE Signal Processing Workshop on Higher-Order +Statistics, (1997), pp. 77–81. +[313] D. Yang and J. Sun, Proximal dehaze-net: A prior learning-based deep network for single image +dehazing, in Proceedings of the european conference on computer vision (ECCV), 2018, pp. 702– +717. +[314] L. Yang, Z. Li, R. Ge, J. Zhao, H. Si, and D. Zhang, Low-dose ct denoising via sinogram inner- +structure transformer, IEEE Transactions on Medical Imaging, (2022). +[315] L. Yang, Z. Zhang, Y. Song, S. Hong, R. Xu, Y. Zhao, Y. Shao, W. Zhang, B. Cui, and M.- +H. Yang, Diffusion models: A comprehensive survey of methods and applications, arXiv preprint +arXiv:2209.00796, (2022). +[316] Q. Yang, P. Yan, Y. Zhang, H. Yu, Y. Shi, X. Mou, M. K. Kalra, Y. Zhang, L. Sun, and +G. Wang, Low-dose ct image denoising using a generative adversarial network with wasserstein +distance and perceptual loss, IEEE transactions on medical imaging, 37 (2018), pp. 1348–1357. +[317] C. Yao, S. Jin, M. Liu, and X. Ban, Dense residual transformer for image denoising, Electronics, 11 +(2022), p. 418. +[318] X. Yi and P. Babyn, Sharpness-aware low-dose ct denoising using conditional generative adversarial +network, Journal of digital imaging, 31 (2018), pp. 655–669. +[319] F. Yu, Y. Zhang, S. Song, A. Seff, and J. Xiao, Lsun: Construction of a large-scale image dataset +using deep learning with humans in the loop, arXiv preprint arXiv:1506.03365, (2015). +[320] G. Yu, G. Sapiro, and S. Mallat, Solving inverse problems with piecewise linear estimators: From +Gaussian mixture models to structured sparsity, IEEE Transactions on Image Processing, 21 (2011), +pp. 2481–2499. +[321] Z. Yue, H. Yong, Q. Zhao, D. Meng, and L. Zhang, Variational denoising network: Toward blind +noise modeling and removal, Advances in neural information processing systems, 32 (2019). +[322] S. W. Zamir, A. Arora, S. Khan, M. Hayat, F. S. Khan, and M.-H. Yang, Restormer: Efficient +transformer for high-resolution image restoration, in Proceedings of the IEEE/CVF Conference on +Computer Vision and Pattern Recognition, 2022, pp. 5728–5739. +[323] S. W. Zamir, A. Arora, S. Khan, M. Hayat, F. S. Khan, M.-H. Yang, and L. Shao, Multi-stage +progressive image restoration, in Proceedings of the IEEE/CVF conference on computer vision and +pattern recognition, 2021, pp. 14821–14831. +[324] J. Zeng, G. Cheung, M. Ng, J. Pang, and C. Yang, 3D point cloud denoising using graph laplacian +regularization of a low dimensional manifold model, IEEE Transactions on Image Processing, PP +(2018). +[325] B. Zhang, J. M. Fadili, and J.-L. Starck, Wavelets, ridgelets, and curvelets for poisson noise +removal, IEEE Transactions on image processing, 17 (2008), pp. 1093–1108. +[326] B. Zhang, M. Fadili, J.-L. Starck, and J.-C. Olivo-Marin, Multiscale variance-stabilizing trans- +form for mixed-poisson-gaussian processes and its applications in bioimaging, in 2007 IEEE Inter- +national Conference on Image Processing, vol. 6, IEEE, 2007, pp. VI–233. +[327] H. Zhang, I. Goodfellow, D. Metaxas, and A. Odena, Self-attention generative adversarial net- +works, in International Conference on Machine Learning, PMLR, 2019, pp. 7354–7363. + +62 +M. ELAD, B. KAWAR AND G. VAKSMAN +[328] K. Zhang, L. V. Gool, and R. Timofte, Deep unfolding network for image super-resolution, in Pro- +ceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020, pp. 3217– +3226. +[329] K. Zhang, Y. Li, W. Zuo, L. Zhang, L. V. Gool, and R. Timofte, Plug-and-play image restoration +with deep denoiser prior, IEEE Transactions on Pattern Analysis and Machine Intelligence, 44 +(2020), pp. 6360–6376. +[330] K. Zhang, W. Zuo, Y. Chen, D. Meng, and L. Zhang, Beyond a Gaussian denoiser: Residual +learning of deep CNN for image denoising, IEEE Transactions on Image Processing, 26 (2017), +pp. 3142–3155. +[331] K. Zhang, W. Zuo, S. Gu, and L. Zhang, Learning deep CNN denoiser prior for image restoration, +in Proceedings of the IEEE CVPR, 2017, pp. 3929–3938. +[332] K. Zhang, W. Zuo, and L. Zhang, FFDNet: Toward a fast and flexible solution for CNN-based image +denoising, IEEE Transactions on Image Processing, 27 (2018), pp. 4608–4622. +[333] K. Zhang, W. Zuo, and L. Zhang, Deep plug-and-play super-resolution for arbitrary blur kernels, in +Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019, pp. 1671– +1681. +[334] M. Zhang, F. Zhang, Q. Liu, and S. Wang, Vst-net: Variance-stabilizing transformation inspired +network for poisson denoising, Journal of Visual Communication and Image Representation, 62 +(2019), pp. 12–22. +[335] R. Zhang, P. Isola, A. A. Efros, E. Shechtman, and O. Wang, The unreasonable effectiveness of +deep features as a perceptual metric, in Proceedings of the IEEE conference on computer vision and +pattern recognition, 2018, pp. 586–595. +[336] X. Zhang, Z. Xu, N. Jia, W. Yang, Q. Feng, W. Chen, and Y. Feng, Denoising of 3d magnetic +resonance images by using higher-order singular value decomposition, Medical Image Analysis, In +press (2014). +[337] X.-P. Zhang and M. D. Desai, Adaptive denoising based on sure risk, IEEE signal processing letters, +5 (1998), pp. 265–267. +[338] Y. Zhang, H. Qin, X. Wang, and H. Li, Rethinking noise synthesis and modeling in raw denoising, in +Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp. 4593–4601. +[339] Y. Zhang, Y. Tian, Y. Kong, B. Zhong, and Y. Fu, Residual dense network for image restoration, +IEEE Transactions on Pattern Analysis and Machine Intelligence, (2020). +[340] Y. Zhang, Y. Zhu, E. Nichols, Q. Wang, S. Zhang, C. Smith, and S. Howard, A poisson-gaussian +denoising dataset with real fluorescence microscopy images, in Proceedings of the IEEE/CVF Con- +ference on Computer Vision and Pattern Recognition, 2019, pp. 11710–11718. +[341] Z. Zhang, Y. Liu, J. Liu, F. Wen, and C. Zhu, Amp-net: Denoising-based deep unfolding for +compressive image sensing, IEEE Transactions on Image Processing, 30 (2020), pp. 1487–1500. +[342] H. Zhao, W. Shao, B. Bao, and H. Li, A simple and robust deep convolutional approach to blind +image denoising, in Proceedings of the IEEE/CVF International Conference on Computer Vision +Workshops, 2019. +[343] L. Zhou, J. D. Schaefferkoetter, I. W. Tham, G. Huang, and J. Yan, Supervised learning with +cyclegan for low-dose fdg pet image denoising, Medical image analysis, 65 (2020), p. 101770. +[344] Y. Zhou, J. Jiao, H. Huang, Y. Wang, J. Wang, H. Shi, and T. Huang, When AWGN-based +denoiser meets real noises, in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, +2020, pp. 13074–13081. +[345] F. Zhu, G. Chen, and P.-A. Heng, From noise modeling to blind image denoising, in Proceedings of +the IEEE Conference on Computer Vision and Pattern Recognition, 2016, pp. 420–429. +[346] Z. Zhu, Y. Wei, J. Wang, Z. Gan, Z. Zhang, L. Wang, G. Hua, L. Wang, Z. Liu, and H. Hu, +Exploring discrete diffusion models for image captioning, arXiv preprint arXiv:2211.11694, (2022). +[347] D. Zoran and Y. Weiss, From learning models of natural image patches to whole image restoration, +in IEEE International Conference on Computer Vision, 2011, pp. 479–486. + diff --git a/JdE1T4oBgHgl3EQfsAXy/content/tmp_files/load_file.txt b/JdE1T4oBgHgl3EQfsAXy/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d6179656284ccc31c65b5ad947f24716bef9912 --- /dev/null +++ b/JdE1T4oBgHgl3EQfsAXy/content/tmp_files/load_file.txt @@ -0,0 +1,3508 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf,len=3507 +page_content='Image Denoising: The Deep Learning Revolution and Beyond – A Survey Paper – Michael Elad, Bahjat Kawar and Gregory Vaksman The Computer Science Department, Technion – Israel Institute of Technology email: {elad,bahjat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='kawar,grishavak}@cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='technion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='il Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image denoising – removal of additive white Gaussian noise from an image – is one of the oldest and most studied problems in image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An extensive work over several decades has led to thousands of papers on this subject, and to many well-performing algorithms for this task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, ten years ago, these achievements have led some researchers to suspect that “Denoising is Dead”, in the sense that all that can be achieved in this domain has already been obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, this turned out to be far from the truth, with the penetration of deep learning (DL) into the realm of image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The era of DL brought a revolution to image denoising, both by taking the lead in today’s ability for noise suppression in images, and by broadening the scope of denoising problems being treated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our paper starts by describing this evolution, highlighting in particular the tension and synergy that exist between classical approaches and modern Artificial Intelligence (AI) alternatives in design of image denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The recent transitions in the field of image denoising go far beyond the ability to design better denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In the second part of this paper we focus on recently discovered abilities and prospects of image denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We expose the possibility of using image denoisers for service of other problems, such as regularizing general inverse problems and serving as the prime engine in diffusion-based image synthesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We also unveil the (strange?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=') idea that denoising and other inverse problems might not have a unique solution, as common algorithms would have us believe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Instead, we describe constructive ways to produce randomized and diverse high perceptual quality results for inverse problems, all fueled by the progress that DL brought to image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is a survey paper, and its prime goal is to provide a broad view of the history of the field of image denoising and closely related topics in image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our aim is to give a better context to recent discoveries, and to the influence of the AI revolution in our domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Key words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image denoising, Inverse problems, MMSE Estimation, Plug and Play Prior (PnP), Regularization by Denoising (RED), Langevin Dynamics, Diffusion Models, Image Synthesis, Perceptual Quality, Perception-Distortion Trade-off.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Introduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Within the wide fields of image processing and computational imaging, the task of image denoising has been given an exceptionally large attention over the past several decades.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, noise suppression in images is one of the oldest and most studied problems in these fields, with numerous papers offering diverse algorithms, analysis of this task in various forms, or extensions of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 A substantial portion of the proposed denoising techniques has been dedicated to the removal of Additive White Gaussian Noise (AWGN) from images, while there are other contributions that target different noise distributions, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Poisson, salt-and-pepper, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1See Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 for the quantities of denoising related papers over the years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='03362v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='IV] 9 Jan 2023 2 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN Removal of noise from an image is an actual necessity that comes up with practically every imaging sensor [191].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, the interest in this problem goes far beyond this practical need – image denoising is the simplest inverse problem, and as such, it has been recognized over the years as the perfect test-bed for assessing new ideas that are often brought to image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In recent years this appeal has further widened with the realization that denoisers can serve other imaging needs [295, 231, 260].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The years 1980 – 2010 have seen consistently improving denoising algorithms, many of which relying on the Bayesian point of view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This progress has been geared by an evolution of image priors that form the backbone of the overall progress in image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This path, which we will refer to as the classical era, started with the early L2-based regularization, proceeding to robust statistics, moving to the introduction of wavelets, and the later deployment of partial differential equations to imaging tasks, and this continued all the way to sparse modeling, patch-based methods, and low-rank structure assumptions2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This extensive work over several decades has led to many well-performing denoising algorithms, and to a compelling and rich scientific field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In fact, ten years ago, these glorious achievements have led some researchers to consider the possibility that “Denoising is Dead”, believing that the existing solutions are already touching the achievable performance ceiling [45, 162, 163].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The past decade has brought a paradigm shift to the general topic of data processing due to the emergence of the Artificial Intelligence (AI) revolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The great success of this deep learning (DL) trend has also introduced a reformation to the broad field of image processing, and to image denoising in particular.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These new winds led to novel techniques for designing better performing denoisers [50, 330, 167, 332, 270, 159, 8, 339, 165], and discovering new and more daring ways for deploying them and broadening their scope [1, 170, 323, 288, 102, 166, 146, 212, 113].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These days, deep-learning based denoisers are at the very top in their ability for noise suppression in images (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [330, 165, 322], leaving no competitive room for the classical alternatives).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In parallel to the above and seemingly detached from the deep learning activity, image de- noising has been also a topic of investigation and discoveries of a different flavor: Harness- ing denoiser engines for other imaging tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This started with the surprising idea that a good performing denoiser can serve as a prior, offering a highly effective regularization to inverse problems [295, 231, 28, 139, 283, 268, 192, 280, 49, 55].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This continued with the discovery that such denoisers can also be used for randomly synthesizing images by offer- ing a practical sampling from the prior distribution of images, this way posing a potent competition to Generative Adversarial Networks (GANs) and other image generation meth- ods [260, 261, 262, 120, 287, 68, 122, 143, 121].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An intriguing sequel to the above synthesis revelation is the idea that solution of inverse problems could be revisited and posed as a sampling task from the posterior distribution of the image given the measurements, thus resorting again to image denoisers as the means for obtaining these solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This very recent line of work unveiled the daring idea that denoising and other inverse problems might not have a unique solution, as common algorithms would 2As referencing this is too long, we provide specific citations to each of these in later sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 3 have us believe [212, 146, 138, 145, 211].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Instead, this sampling approach has been shown to lead to constructive ways for producing randomized and diverse high perceptual quality results for inverse problems, exposing as a byproduct the inner uncertainty in such tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' All the above achievements have been strongly influenced and fueled by the progress that DL brought to image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Adopting a wider perspective, image denoising these days has new horizons, and if any conclusion can be drawn from these recent accomplishments, it would be that this field is a very much alive playground with great challenges and prospects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This paper aims to disclose and detail the compelling story drawn above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our prime goal is to provide a broad view of the history of the field of image denoising and closely related topics in image processing, give a better context to recent discoveries, and highlight the influence of the AI revolution in our domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We start our journey in Section 2 by clearly defining the image denoising task, discussing its ill- posed nature, and demonstrating its appeal over the years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We proceed in Sections 3 and 4 by describing the evolution of image denoisers, from the classical era to the deep-learning-based alternatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Section 5 highlights the tension and the possible synergy that exists between classical approaches and modern Artificial Intelligence (AI) alternatives in design of image denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In the second part of the paper we change gears and move to discuss three recent discoveries that consider image denoisers as building blocks for other needs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We start broadly in Section 6 by defining the denoiser engine and its properties, and set the stage for the presentation of these three discoveries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We proceed in Section 7 by discussing the ability to deploy these engines for regularizing inverse problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Section 8 exposes the possibility of synthesizing images using such denoisers, and Section 9 presents the notion of targeting perfect perceptual quality outcomes in image denoising and inverse problems by sampling from the posterior distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We conclude this paper in Section 10 with an attempt to point to open questions and potential research directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Disclaimer: While this paper aims to present a survey on the various ups and downs that the field of image denoising has gone through over the years, it would be simply impossible to do justice to all the published literature in this domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We apologize if some papers are omitted from our references, as we attempt to mark the critical milestones in the history of this field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The interested reader is referred to [156, 197, 130, 18, 92, 281] for other surveys with different orientations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image Denoising – Background.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Problem Definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our story must start with a proper definition of the denoising problem, and this will also serve the need for defining our notations hereafter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An ideal image3 x ∈ RN is assumed to be drawn from the image manifold, represented by the probability density function p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our measurement is the vector y ∈ RN, given by y = x + v, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) 3For simplicity of the discussion, assume that we refer to grayscale images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Addressing color is discussed shortly in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN where v ∈ RN is a zero-mean independent and identically distributed (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=') Gaussian noise, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' v ∼ N(0, σ2I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The denoising task is the recovery of x from y with the knowledge of σ, and a denoiser is thus a function of the form ˆx = D(y, σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While there are many ways for assessing the performance of such denoisers, the most common one is the Mean-Squared-Error (MSE) measure, MSE = E � ∥x − ˆx∥2 2 � = E � ∥x − D(y, σ)∥2 2 � , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) where the expectation is taken over the image distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A well-known result in estimation theory states that the best denoising with respect to this measure (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', achieving the Minimum MSE, thus referred to as MMSE) is given by [217], ˆxMMSE = E (x|y) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3) This formula is misleadingly simple in its concise form, as designing a denoiser that achieves MMSE is quite challenging and oftentimes simply impossible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' By the way, the curious reader may wonder why are we emphasizing the MSE measure and the MMSE denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The answer will be carefully unfolded in the later parts of the paper, where these choices play a critical role.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A brief note about this appears later in this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' How hard is it to denoise an image?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' How complicated could it be?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Again, the simplicity of the problem definition is illusive, as this task is highly tough and in fact ill-posed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' One could easily design a filtering method for attenuating and suppressing the noise in y, but such a process is very likely to ruin the image content as well, losing small details, sacrificing edges, damaging fine textures, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The Gaussianity Assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In the problem definition above we focused on a very specific case of a zero-mean i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gaussian noise contamination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The natural question arising is why are we restricting the discussion to this case?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A brief inspection of the literature on image denoising reveals that this noise model is very popular, covered by most of the developed algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Where this popularity comes from?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Several answers come to mind: Central Limit Theorem: Noise in imaging may arise due to many physical reasons, and their accumulation leads often to a Gaussian distribution of the form discussed above, as an empirical manifestation of the Central Limit Theorem [277, 127].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As such, rather than modelling the intricate noise origins, a Gaussian assumption offers a blessed simplification for the later analysis and algorithm development in this field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The Poisson Alternative: One might rightfully argue that the proper distribution to address for imaging noise would be the Poisson one, as imaging sensors essentially count photons, and their arrival is of Poissonian nature [59].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While this argument is indeed correct, when photon counts are high, the Poisson distribution becomes a Gaussian one [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' If the counts are low, a variance stabilizing transform, such as Anscombe [7], can turn these measurements into additive Gaussian contaminated, again resorting to the Gaussianity regime [81, 325, 233, 184, 12, 272, 334].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 5 Mathematical Elegance: The Gaussian case is easily modeled, and consequent formulations become simple and elegant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Such is the case with the log-likelihood function p(y|x) and other related derivations that will be shown in subsequent sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' MMSE Denoiser Engines: Our last argument for the Gaussianity assumption is quite surprising and unfamiliar to many in our field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As it turns out, an MMSE denoiser for the removal of zero-mean i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gaussian noise is of great theoretical importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Such an engine has critical properties that enable its deployment as a prior (see Section 7) for inverse problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In addition, and perhaps more importantly, such denoisers have strong theoretical ties to the score function [260], a fact that will be highlighted and exploited in Sections 8-9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Extensions of Image Denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' There are many variations to the core image de- noising task mentioned above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These can be roughly divided into four sub-categories: (i) Handling different noise statistics;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (ii) Addressing structured noise removal;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (iii) Consider- ing different and various visual content;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' and (iv) Posing different problem assumptions and settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lets us briefly describe each of these.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A natural extension of the original denoising problem posed above is to consider other noise statistics, such as Poisson (also referred to as shot-noise) denoising [93, 248, 100, 65, 325, 291, 233, 184, 226, 12], salt-and-pepper noise removal [40, 264, 75, 300, 210], treating mixtures of Poisson and Gaussian noise [176, 326, 184], and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Other extensions consider structured noise, such as quantization noise in compression artifact removal [206, 176, 297, 63, 106], film-grain removal [312, 311, 62], and textured or otherwise colored noise [108, 193, 3, 219].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Another challenging task is noise removal in scenarios in which the noise is not spatially homogeneous, such as white noise with spatially varying σ [187, 332, 344, 148].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The inpainting problem [90, 182, 306, 134] can be regarded as a special such case, where portions of the image are simply missing and need to be revived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These missing pixels can be regarded as contaminated by a very strong noise, while other regions of the image are reliably measured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The denoising task may assume a different setting altogether if the visual content is of different form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Such an example is noise reduction in bursts of snapshots [173, 102, 198, 189, 86, 166, 80], where several images are treated jointly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Somewhat similar yet different is the task of video denoising [179, 9, 10, 275, 276, 290, 259, 180, 322, 161], in which we may seek online filtering of the incoming frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' When handling specific imaging types (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', microscopy [223, 25, 13, 103, 340, 186, 158, 188], CT [164, 48, 318, 316, 70, 314] and PET/SPECT imaging [51, 82, 105, 227, 343, 266], and more), the algorithm design may require adequate adaptations to the data format (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' treating 3D volumes [294, 336, 324, 64, 178]) or to the way it is captured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The last category of extensions has to do with our prior knowledge when addressing denoising tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blind denoising [131, 169, 157, 47, 201, 256, 342, 321, 114] refers to the case in which the noise is known to be i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gaussian, but σ is unknown, and may be even spatially changing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A more complex situation is when the noise statistics are totally unknown [345, 8, 2, 307].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In this context, a special case of great interest in recent years is removal of true noise from given images captured by digital cameras (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', cellphones) [298, 149, 170, 285, 301, 128].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The Interest in Image Denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 presents a graph showing the number of papers that have been published each year on the topic of image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Overall, nearly 30, 000 such papers are identified by Clarivate Web-Of-Science (WoS), published mostly in the past 25 years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As such, this is clearly one of the most heavily studied topics in image processing, and perhaps in exact sciences in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Also evident from this graph is the consistent growth over the years in this topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Where does this popularity come from?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: The number of papers on the image denoising topic over the years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This graph corresponds to the search topic=((image or video ) and (denoising or (noise and remov) or clean)) performed on December 1st 2022 in Clarivate Web-of-Science (WoS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Note that the lower count in 2022 does not stand for a new trend, but rather caused by a delayed reporting of new papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A prime reason to study image denoising is its practical relevance to imaging systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Removal of noise from acquired visual information is an actual necessity that comes up with practically every imaging sensor [191].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, various algorithms have been developed for implementation in image processing software packages and within the ISP (Image Signal Processor) – the path that starts with the raw acquired data and ends with a high quality image – of every digital camera [23, 30, 298, 338, 124].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Beyond the obvious practical motivation described above, the interest in image denoising has other, more profound, roots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image denoising is the simplest inverse problem, and as such, it has been recognized over the years as the perfect platform for assessing new ideas that are often brought to image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, all the major milestone advancements in image processing started with denoising experiments, so as to explore their validity to visual data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This was the case with Tikhonov-Arsenin’s regularization theory [282, 104], Wavelets [185], non–linear filtering based on partial differential equations [302, 111], sparse 2800 - 2600 - 2400 - 2200 - 2000 - 1800 - 1600 - 1400 - 1200 - 1000 - 800 - 600 - 400 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 200 IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 7 modeling of data [31, 88], and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' All these and many other sub-fields in imaging sciences saw image denoising as a critical first step in their diffusion into broad image processing tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We discuss these in greater details in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The above two reasons for the popularity of image denoising may account for many of the published papers in this domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, the reason we have chosen to write this paper has to do with a third, and very different, origin of popularity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image denoising has gained much interest and appeal in recent years due to the surprising realization that denoisers can serve other imaging needs, thus widening their scope and influence [295, 231, 260, 120, 145].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This discovery relies on a fundamental theoretical connection between denoisers and the prior distribution of images [200, 265, 84].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This bridge provides a solid and well-motivated approach to old and new tasks in image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In fact, this is the topic we shall be highlighting in the latter sections of our paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We thus defer a more detailed explanation of these ideas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image Denoising – The Classic Era.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' So far we have discussed image denoisers without concretely diving into the actual quest of their construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' So, how can we design an image denoiser?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Not so surprisingly, the answer to this question has evolved and changed over the years, with the accumulated knowledge and the progress in signal and image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' And still, we may broadly separate this progress in design of image denoisers into two eras - the classical one that started in the 70’s and ended in the past decade, and the AI revolution era that started around 2012 and is very much vivid till this day.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In this section we shall focus on the classical algorithms, and more specifically on the Bayesian point of view that played a key role in their creation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The Bayesian Point of View for Design of Denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Starting with Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1), given the noisy image y and knowing that v ∼ N(0, σ2I), our goal is to estimate x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A simple approach towards this task would be the Maximum-Likelihood Estimation (MLE) [205, 235], seeking ˆx that maximizes the conditional probability p(y|x), essentially maximizing the likelihood of the given measurements y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Due to the Gaussianity of the noise, this probability is given easily by p(y|x) = const · exp �−∥x − y∥2 2 2σ2 � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) and maximizing it amounts to the trivial and fruitless solution: ˆxMLE = y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This outcome is a direct manifestation of the ill-posedness of the denoising problem, exposing the need for more information for its solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' p(x|y) = p(y|x) · p(x) p(y) = const · exp �−∥x − y∥2 2 2σ2 � p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) In the last equality we have absorbed the denominator p(y) into the constant as it is inde- pendent of x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While this expression is a simple modification to the MLE (multiplying the likelihood by the prior p(x)), this is in fact a significant change, as it regularizes the inversion process from y to x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 8 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN Two commonly used estimators that exploit p(x|y) are the MAP and the MMSE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The first is obtained by maximizing this posterior, leading to the Maximum A’Posteriori Probability (MAP) estimation [205, 235], given by4 ˆxMAP = arg min x �∥x − y∥2 2 2σ2 − log (p(x)) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3) As opposed to the MLE, ˆxMAP is dictated by two forces, the first pulling it towards y, while the other seeks a “well-behaved” result that leads to a low value of − log (p(x)) – this is exactly the regularization mentioned above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Similarly, the MMSE estimation [132] is also reliant on the posterior probability obtained, as shown in Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3), via5 ˆxMMSE = E (x|y) = � x xp(x|y)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4) While this expression is very concise and clear, operating with it has proven to be quite chal- lenging due to the need for the partition function – the normalizing factor of this distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This explains the vast popularity of the MAP-based approach among the classical methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Be it the MAP or the MMSE, the Bayesian point of view requires access to p(x) or proxies of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This brings us to the next discussion on the evolution of priors in image processing and their impact on the design of denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Evolution of Priors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A key player in image processing is the prior, p(x), the proba- bility density function of the image distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Modeling p(x) and using it for problems in visual data processing have served as the skeleton of our field, and defined its trajectory over the years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Below we outline the central milestones in the evolution of modeling p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' One critical theme to remember is the fact that the expression − log(p(x)), which appears in the popular MAP estimation (see equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3)), should assume a closed-form expression so as to lend itself to a manageable numerical optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' For this reason, most attempts to characterize p(x) have chosen to use the Gibbs distribution form [132], p(x) = c·exp{−ρ(x)}, shifting our focus from p(x) to the energy function ρ(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' So, what should ρ(x) be to properly describe the image distribution?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In order to keep this discussion concise, we present in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 a brief list of possible analytical expressions for this function, without diving into their meaning, inter-relations, and effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A more detailed explanation of these expressions is provided in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Please bear in mind the fact that this na¨ıve approach of choosing an expression for ρ(x) is nothing short of a fantastic feat – can we really expect a simple formula to grasp the richness of the image content distribution?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The evolution of the ideas in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 is characterized by several major and interconnected trends – the migration from the familiar Gaussian distribution to the less intuitive heavy- tailed ones, the departure from L2 to sparsity-promoting measures such as the L1, the drift 4This minimization is obtained by taking the − log of the above expression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5See Appendix A for a derivation of this statement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 9 Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: Evolution of priors for images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Years Core concept Formulae for ρ(·) ∼ 1970 Energy regularization ∥x∥2 2 1975-1985 Spatial smoothness ∥Lx∥2 2 or ∥Dvx∥2 2 + ∥Dhx∥2 2 1980-1985 Optimally Learned Transform ∥Tx∥2 2 = xT R−1x (via PCA) 1980-1990 Weighted smoothness ∥Lx∥2 W 1990-2000 Robust statistics 1T µ{Lx} e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Hubber-Markov 1992-2005 Total-Variation � v∈Ω |∇x(v)|dv = 1T � |Dvx|2 + |Dhx|2 1987-2005 Other PDE-based options � v∈Ω g � ∇x(v), ∇2x(v) � dv 2005-2009 Field-of-Experts � k λk1T µk{Lkx} 1993-2005 Wavelet sparsity ∥Wx∥1 2000-2010 Self-similarity � k � j∈Ω(k) d{Rkx, Rjx} 2002-2012 Sparsity methods ∥α∥0 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' x = Dα 2010-2017 Low-Rank assumption � k ∥XΩ(k)∥∗ from linear approximation techniques (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' PCA) to non-linear ones (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' wavelets and sparse modeling), and above all, the replacement of axiomatic expressions with learned priors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Other Classical Denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While the above-described Bayesian approach has proven to be quite productive, yielding a wide variety of denoising methods, alternative and more direct design techniques for such algorithms were also considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Here we mention few such methods, some relying on the general notion of spatially adaptive smoothing of image content, while others leverage self-similarity that often-times exists in images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Consider the following rough motivating idea: Recall that a denoiser should attenuate ran- dom i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gaussian noise while preserving the image content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' When operating on a noisy pixel y[i, j], our intuitive strategy is to open a neighborhood around it, Ω[i, j], for averaging purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' If it so happens that the local image content in Ω[i, j] behaves like a tilted plane, a simple averaging of these neighborhood pixels would provide a perfect local noise suppression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' When the local behavior deviates from this simple structure, the averaging mask should take this into account and adapt accordingly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is exactly the idea behind the Bilateral filter [284, 87] and the Beltrami-Flow [255], in which the averaging weight takes into account two forces – (i) the proximity of the weighted pixel to the center of the neighborhood;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' and (ii) the proximity of this pixel’s value to the center pixel’s value, indicating its relevance to the averaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Computing these weights for each pixel y[i, j] and normalizing them to sum to one creates the local averaging kernel to apply.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This way, if Ω[i, j] covers an edge between two regions, averaging will be restricted to the “relevant” pixels while discarding others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Non-Local-Means [32] takes this approach one step further by widening Ω[i, j] to a semi-local region, and by assessing pixels’ relevance to the averaging by patch-matching instead of scalar value comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This way we keep the spatially adaptive averaging concept, but robustify it and make it non-local.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kernel-regression [271] is also a spatially adaptive averaging technique, but one that relies on a local parametric estimation of 10 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN the pixels’ gray-values in Ω[i, j].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A 2D Gaussian is fitted to the pixels in Ω[i, j], dictating its orientation and span, and this way offering a smoothing along edges instead of across ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Another direct image denoising method that deserves our specific attention, especially due to its superior performance, is the BM3D algorithm [61].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This technique relies on the expec- tation that 2D-DCT transformed local patches in natural images are expected to be sparse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Furthermore, by gathering groups of similar patches from the overall image area, this trans- formed sparsity should align in support.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, BM3D builds a 3D cube of similar patches for each pixel y[i, j], transforms this cube together and forces a joint sparsity outcome.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Among the classical denoising algorithms, BM3D is considered among the very best approaches in terms of MSE results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In this context, we also mention the Weighted Nuclear Norm Minimiza- tion (WNNM) denoising method [110] and its followups (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [310]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These rely on a similar rationale to the BM3D, but replace the joint sparsity by a low-rank assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Is denoising dead?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='. The paper “Is Denoising Dead?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', published in 2009 by Chat- terjee and Milanfar [45], exposed a disturbing feeling shared by many in our community at the time – a suspicion that we are touching the ceiling in terms of denoising ability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This impression relied on the considerable progress in design of denoising algorithms during the preceding years, and the fact that very different approaches towards this problem were found to lead to comparable denoising performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A followup work [162, 163] by Levin and Nadler in 2011-2012 addressed the same question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Both lines of work suggested a derivation of an approximated lower-bound of the MSE for noise removal ability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Without diving into the specifics of their derivations, we should mention that both concluded that there is still room for some improvement, even though this claim was not made constructively, leaving the question of how to obtain better techniques vague at best.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' From a practical point of view, and despite these optimistic conclusions, the progress in denoising performance after 2010-2011 was very slow and of diminishing returns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, the graph in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 shows a decrease in the number of papers on image denoising around 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, this setback held true mostly for classically oriented methods of the kind discussed above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The emergence of deep neural networks brought a massive change to our domain, shattering the common belief about the end of this field, and the folklore around the attained performance limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, deep learning brought new ways for the design of highly effective image denoisers, taking the lead in today’s ability for noise suppression in images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, the AI revolution had a much wider impact on the image denoising task, opening new horizons to possibilities and abilities never dealt with before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Among many such directions, these include (i) image adaptation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (ii) true noise removal;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' and (iii) addressing new denoising objectives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In the following section we discuss all these with much greater details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While the past decade can certainly be titled as the era of AI revolution, there has been another revolution, perhaps of a bigger scale, that took place in parallel in the field of image processing – one that refers to the discovery that an image denoiser can serve other tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' From the seminal paper on the Plug-and-Play Priors [295], through Regularization by Denoising paper [231], and all the way to the recent and exciting diffusion-based image synthesis [260, IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 11 120], image denoisers are taking a new and much more exciting role in image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As this is the main theme of this paper, We shall expand on this line of work in Section 6 and after.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' So, to summarize, for the question ‘is denoising dead?’' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' our answer is ‘definitely not!’' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', and this is due to the vast influence of deep learning, and other new directions that brought new life to this domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The rest of this paper is dedicated to the description of these developments and their impact and prospects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image Denoising – The Deep Learning Revolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The recently discovered ability to effectively train deep neural networks for classification, regression and other tasks should not be taken lightly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nothing in this process is well-understood or well-justified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, the opposite is true – with overparametrized networks and a highly non-convex objective function, it is quite surprising that such networks are able to learn and generalize at all.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' And yet they do!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is the essence of the AI revolution that has found its way to so many fields, impacting each in a profound way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image processing and computational imaging is yet another playground that has been deeply influenced by this AI revolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Today’s practice and theory in image processing is entirely different from the ones considered only 10 years ago.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, image processing undergraduate and graduate courses had to change dramatically due to these new winds of change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' And all this brings us to the new era of image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In Section 3 we asked how should image denoisers be designed, and gave an answer that relies on the classical Bayesian approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We now return to this question, and provide an entirely different answer – one that builds on supervised deep-learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This approach takes the following steps: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Start by gathering a large6 dataset of clean images of diverse content - the kind of which we aim to denoise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We shall denote this set as X = {xk}M k=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' For simplicity assume that all images are of the same size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' If this is not the case, an easy process of random tile extraction may convert the given data to this desired structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Recall that our goal is a design of a denoiser that removes additive white Gaussian noise of a specific strength σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, the next step is to create noisy instances of X, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Y = {yk}M k=1, where for 1 ≤ k ≤ M, yk = xk + vk and vk ∼ N(0, σ2I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In fact, every example xk could be contaminated by several noise realizations, this way enriching the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Define a parametric denoising architecture ˆx = DΘ(y, σ) that should be trained to perform the denoising task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This stage is necessarily vague as there are many options for constructing such an architecture, and there seems to be no clear guidelines for its structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, the literature offers various such options conceived by trial-and- error, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='. More details and a discussion on this delicate stage is given below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Define the training loss – a penalty function that exploits the availability of X and 6By ‘large’ we mean thousands and sometimes millions of images, and the more the better.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Often-times, the training itself may rely on several hundreds of images, and these are augmented by randomized operations such as crop, scale-down, rotations, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 12 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN Y and the defined parametric denoiser DΘ(y, σ), posing a cost value to be minimized with respect to Θ, encouraging the denoised images to be close to their corresponding ideal ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Such a functional could be of the form L(Θ) = M � k=1 dist (xk, ˆxk) = M � k=1 dist (xk, DΘ(yk, σ)) , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) where dist(x, ˆx) is a distance function between the ideal and the denoised image, such as MSE – dist(x, ˆx) = ∥x − ˆx∥2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Minimize L(Θ) with respect to Θ via stochastic gradient descent [24] applied on small batches of training pairs (xk, yk), and exploiting back-propagation [242].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Once all the above steps are completed, the denoiser ˆx = DΘ(y, σ) is ready to be deployed on newly incoming images, expected to perform better or worse in noise removal, depending on the size and quality of the training set, the similarity between the image to be denoised and the training set, the chosen architecture, and the quality and the hyperparameters of the optimization process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A variant of the above is blind denoising, in which σ is unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The straightforward approach towards this task is brute-force learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This means that for every ideal image x we produce a sequence of noisy versions yσ k with varying values of σ in the range we aim to cover.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Then learning is done by minimizing a loss that integrates over all the noise levels, L(Θ) = � σ M � k=1 dist (xk, DΘ(yσ k)) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) Observe that in this case the denoiser DΘ gets only the noisy image without σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An interesting alternative to the above was discovered in [201], showing that a bias-free architecture becomes robust to the noise power, and thus a simple training for a single value of σ generalizes well to other levels of noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An amazing consequence of all the above description is this: All the glorious work on image priors that fueled the design of classical denoisers and other tools in image processing seem to have become totally obsolete.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Observe that in this supervised deep learning approach we have no need nor room for all the knowledge and know-how that have been accumulated carefully over decades of extensive research and engineering work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Is this a fair description of the current state of things in our field?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' To a large extent, the sad answer is positive, while some reservations to this conclusive statement will be discussed in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The emergence of deep learning techniques and their new abilities brought a new evolution of ideas on the design and span of image denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While this literature is vast and rich, we describe below several key trends in this progress, in an attempt to expose both the new abilities obtained, and the new ideas accompanying them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These come in several fronts: Better Denoisers: Improving image denoising capabilities via deep learning be- came a natural new front, where the aim is to perform better in terms of Peak- IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 13 Signal-to-Noise-Ratio (PSNR) on an agreed-upon corpus of test images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is mani- fested by an evolution of architectures that started with simple feed-forward Convolu- tional Neural Networks (CNN) [330], proceeded to more advanced structures, such as UNet [234, 115], and all the way to the recently introduced Transformers [78, 165, 322].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 we illustrate this trend by presenting a graph that shows the progress in denoising PSNR on the well-known BSD68 dataset [190].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More details on each of these algorithms is brought in Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2006 2008 2010 2012 2014 2016 2018 2020 Year 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='5 28 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='5 29 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='5 PSNR K-SVD BM3D FoE LSSC EPLL MLP WNNM CSF TNRD DnCNN IRCNN NLRN MVCNN N3Net FFDNet FOCNet RIDNet GCDN SwinIR DRUNet Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: Denoising performance on the BSD68 dataset [190] with σ = 25 (K-SVD [89], BM3D [61], FoE [237], LSSC [181], EPLL [347], MLP [33], CSF [252], WNNM [110], TNRD [50], DnCNN [330], IRCNN [331], NLRN [167], MVCNN [168], N3Net [218], FFD- Net [332], FOCNet [133], RIDNet [8], GCDN [292], SwinIR [165], DRUNet [329]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Different Training Schemes: We described above the most obvious, supervised, training strategy, where we gather pairs of ideal images and their noisy version.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Various unsupervised alternatives have been also developed for this task, such as Noise2Noise [160], Noise2Void [152], Noise2Self [16], SURE-based denoising [337, 175, 207], and others, all aim to operate on noisy images directly without the need for an access to their clean versions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' It should be clear, though, that these techniques become relevant only in cases where the noise does not follow a known analytic structure, as otherwise the supervised alternative would be preferred.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Another appealing approach that adopts an unsupervised denoiser training is “Deep Image Prior” (DIP) [286], where a network is trained on a single image to best fit itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An early stopping of this learning is shown to yield an effective denoising, revealing the regularization capabilities of the UNet architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' True Noise Removal: We mentioned above the Noise2X line of work [160, 152, 16], which enables denoising of images without access to their clean versions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This 14 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN ability becomes crucial when operating on images with un-modeled and unknown noise statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In such cases, learning should rely on more fundamental forces, such as self- similarity in images, the slow tendency of regressed neural networks to recreate noise from noise, the joint information that exists in burst of frames, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More broadly speaking, removal of true noise from images is a relatively new topic in image denoising, as it has hardly been addressed in the classical era due to its evident complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' With advanced self-supervised and unsupervised learning techniques, new impressive abilities were created [298, 149, 170, 285, 301, 128].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image adaptation: This refers to the ability to take an already designed/trained denoiser and adapt it to perform better on unique images that deviate from the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This way, general purpose denoisers could be boosted when operating on scanned documents, astronomical images, cartoon images and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The adaptation itself could be done in various ways, the most natural of these is the following [289]: Given a noisy yet unique image to be cleaned, apply first the available denoiser DΘ0 and obtain ˆx0 = DΘ0(y, σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Now retrain the denoiser (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' update the parameters Θ) by minimizing dist (ˆx0, DΘ(y, σ)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Similar to the core idea behind Noise2Noise [160] and DIP [286], few gradient steps of this minimization are expected to go in the proper direction and yield a more informative and relevant denoiser, thus boosting the result for this specific image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The final outcome is obtained by ˆx = DΘ(y, σ), using the slightly updated parameters Θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Addressing Different Objectives: When describing the supervised learning strat- egy of denoisers, we offered the L2 loss that considers PSNR performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Over the years this quality measure took the lead in most papers, despite its known weaknesses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, our community has been constantly striving to get the MMSE denoiser, if not in body, then at least in spirit, and this is evident from the PSNR performance tables that appear in almost every paper on image denoising published over the years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As we argue later on in Section 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1, while MMSE denoisers are of great value by them- selves, their outcome is not necessarily visually appealing, being an average over many potential solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bearing this in mind, the learning paradigm creates a new opportunity for serving “new masters” – recall that the learning loss function is highly non-convex, and yet we have no fear of its complexity when training the neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, we can easily replace the pleasant L2 by more sophisticated or adequate penalties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The immediate alternative that comes to mind is SSIM [299], which offers a more robust distance measure between images by considering structural similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We could go further and consider perceptual losses such as LPIPS [335], that is further robustified by a learned representation in order to fairly assess proximity between images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This trend can be characterized as an attempt to produce visually pleasing and crisp images from the denoisers, ones that will surpass the MMSE alternative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A step forward in this direction takes us to Generative Adversarial Networks (GANs) for denoising [69, 67, 212].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The idea is to challenge the output of the denoiser, by feeding it into a classifer that should tell apart true images versus denoised ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' By leveraging this classifier’s IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 15 guidance, the denoiser can learn to produce better looking images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We will come back to this idea in Section 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1, offering an improved approach that targets perfect perceptual quality results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The description given above provides nothing but a glimpse into a very vibrant and rich body of literature that finds image denoising as an appealing playground for research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Still, we stop the survey of deep learning for denoising here, as our prime goal is the denoisers themselves and algorithms building on top of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As one final note, observe that all the preceding discussion on classical and modern denoisers’ design is given without referring to color images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, the formulation in this paper con- siders a grayscale image x, yet most denoisers, old and new, are typically required to process color (Red-Green-Blue) images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Some of the existing methods discussed above are easily ex- tended to color by operating on the three chroma channels jointly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' For example, NLM [32] and K-SVD denoising [89] operate on RGB patches directly by flattening them to longer vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Another approach is to turn to the YUV or YCbCr color-space, and operate on the luma (Y) and the chroma (Cb/Cr or U/V) layers independently, as BM3D does [61].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Denoisers based on deep neural networks typically handle color directly by feeding the RGB image as a 3-dimensional tensor input to the network, processed by subsequent 3D convolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More intricate approaches do exist, in which the geometrical interplay between the color layers is taken into account more adequately [255].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Synergy between Classics and Deep Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' With the description given above, the reader may (rightfully!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=') get the impression that the vast knowledge regarding image denoising gathered during the classical era has become obsolete with the emergence of the deep learning alternatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, this claim is not entirely correct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In reality, the themes investigated and promoted by classical algorithms are serving as the foundations for building DL denoisers, even if practiced implicitly, and these are mostly manifested by the choice of architectures to be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' To illustrate this, we mention several well-known key concepts of classical image denoising algorithms, and show their impact on DL architectures: Locality: Most information relevant to restoring a pixel’s value in denoising is con- tained in its local neighborhood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In classical algorithms, this concept is embodied using patch processing, local filtering, local image priors, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' When it comes to DL schemes, many denoisers choose convolutional layers as their primary processing path, which leads to architectures with small to moderate receptive fields [308, 330, 332].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sparsity under appropriate transforms: Local image patches are expected to be sparse when represented using certain 2D transforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' On the classical side, several of the priors listed in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 fall into the sparsity-promoting category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' On the DL side, a similar treatment can be observed, where the commonly used ReLU activation promotes sparsity by nulling the negatively activated neurons [101].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Self-similarity: Most image patches have similar twins at other locations in the same image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While classical algorithms usually harness this property by gathering similar patches and processing them jointly, some recent DL schemes leverage self-similarity using self-attention layers [165, 317].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 16 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN Unfortunately, these and other concepts inherited from the classical era do not provide a constructive answer to the main question DL faces: How should we choose the appropriate architecture for the denoising task?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Researchers facing this question are usually taking one of the two following options: (i) Copy: adoption of an existing architecture that has been demonstrated to lead to good results in a similar task (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', DnCNN, UNet, ResNet, and more) [330, 234, 117].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Usually such an adoption is accompanied by some minor modifications such as changing the number of channels or layers, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' or (ii) Trial and error: gathering an architecture by piling a mix of known building blocks such as convolutions, strides, batch normalization steps, ReLU, fully-connected layers, down- and up-scaling, skip-connections, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Both these options seem to work rather well, leading to networks achieving very good practical results – see [330, 332, 165].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, this brute-force approach typically tends to end up with heavy and cumbersome architectures, relying on millions of trainable parameters, making the resulting networks expensive to use and hard to train.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Another downside in such architec- tures is their lack of explainability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While this may seem unimportant, having a black-box denoiser with no explainability implies an inability to leverage it to other tasks (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', image separation [199, 91, 153]), or probe it for identifying origins of failures for ill-treated regions in the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More broadly, the brute-force approach towards architecture design for denoisers may require a lengthy trial and error process and may end up hitting a performance barrier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An alternative to copying or guessing architectures does appear in recent literature, known as unfolding [109, 341, 250, 83, 204].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This approach constructs the neural network so as to mimic the computational stages of a well motivated algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The term unfolding has to do with the fact that many classical image denoising methods involve iterative algorithms, and thus networks mimicking these should unfold their iterations to a feed-forward computational path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This approach typically produces concise and perfectly explainable networks, both in terms of the learned parameters and the activations obtained, which are easier to train.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In addition, such networks tend to be easily and effectively adapted to different data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' There are various examples in the literature for the unfolding approach for various regression tasks, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [313, 58, 328, 125, 289, 250, 204].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Here we briefly describe two such methods for illustrative purpose: Deep K-SVD [250] and LIDIA [289].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Both propose a conversion of a classical denoising algorithm into a deep neural network architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Deep K-SVD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Deep K-SVD [250] is an unfolding version of the K-SVD image de- noising algorithm [89].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We start with a brief explanation of the original K-SVD method, and then turn to describe its unfolding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K-SVD denoising is based on sparse representation theory for constructing the image prior [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Consider a clean image x and patch extraction operators {Rk}k such that Rkx ∈ Rn are image patches of size √n × √n taken from location k in the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The sparsity-promoting prior assumes that any such patch, Rkx, can be represented as a linear combination of f ew columns (also referred to as atoms) from a redundant dictionary D ∈ Rn×p (redundancy implied by p > n), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) Rkx = Dαk , IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 17 Sparse Coding Patch Decomposition Patch Averaging Patch Denoiser Patch Reconstruction y MN \uf0ce \uf07b \uf07d z y n k k i \uf0ce = R \uf07b \uf07d ˆz n k k \uf0ce ˆx MN \uf0ce z n k \uf0ce α p k \uf0ce ˆz α n k k \uf03d \uf0ce D Noisy image M N \uf0b4 Reconstructed image M N \uf0b4 λ Evaluation \uf06c Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: End-to-end architecture of the Deep K-SVD network [250].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' where αk ∈ Rp is a sparse vector, ∥αi∥0 ≪ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Armed with this assumption, K-SVD poses the following minimization problem: (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) min {αk}k,x µ 2 ∥x − y∥2 2 + � k � λk ∥αk∥0 + 1 2 ∥Dαk − Rkx∥2 2 � , where y is the given noisy image, and µ and λk are hyper-parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In this expression, the first term is the Log-Likelihood that requires a proximity between the reconstructed image x and the noisy image y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The second and third terms represent the sparse representation prior, demanding that every image patch Rkx in every location k has an approximate sparse representation αk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The K-SVD algorithm solves this minimization problem by applying the following two steps iteratively: (i) Fix x (initialized by x = y) and update the vectors {αk}k;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' and (ii) Update x while freezing the sparse representation vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The first is referred to as the sparse coding stage, where each patch in the contemporary solution obtains a sparse representation via the Orthogonal Matching Pursuit (OMP) greedy algorithm [214].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The second step becomes a quadratic minimization task, its solution being a simple variation of patch-based averaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A single round of the above two steps has been shown to suffice for getting very good re- sults [89], and a repetition of this round several times could further boost the results [347].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The dictionary D in the above process could be either universal – pretrained to best sparsify natural image content, or image adaptive – updated to the image y itself within the above optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We now turn to describe the Deep K-SVD algorithm, which adopts the universal dictionary approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The end-to-end architecture referring to a single round is illustrated in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This neural network consists of three main blocks: Patch Decomposition, Patch Denoiser, and Patch Averaging, all following closely the very same steps described above, with appropriate adaptations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Patch Decomposition breaks the input image y into a set of fully overlapped patches {zk}k = {Rky}k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The next block, Patch Denoiser, is applied patch-wise, but replaces 18 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN the OMP by LISTA [109], in which zk undergoes sparse coding via a differentiable shrinkage- based iterative algorithm [109].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These inner iterations are unfolded as well to create a feed- forward computational path that starts with zk and ends with ˆzk = Dαk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Due to the gap between OMP and LISTA, a sub-network of fully-connected layers computes the value of λk for the incoming patch zk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The last block, Patch Averaging, rebuilds the reconstructed image ˆx by averaging the cleaned patches ˆzk using learned weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This Deep K-SVD network is trained end-to-end by minimizing the MSE distance between the ideal and denoised images for a set of M training images, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3) L (Θ) = M � k=1 ∥xk − ˆxk∥2 2 = M � k=1 ∥xk − DΘ (yk)∥2 2 , where {xk, yk}k is a set of clean and noisy image pairs to train on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' DΘ is the denoising network, where Θ stands for all trainable parameters, consisting of the dictionary D, the parameters of the sub-network that evaluates λk and the shrinkage thresholds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Despite the close resemblance between the original algorithm and its unfolded version, the later performs much better [250], surpassing classical methods and aligning with deep-learning based techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This should not come as a surprise as the unfolded denoiser DΘ is trained in a supervised manner, being fully aware of the task it serves, whereas the original algorithm relied on a “guessed” image prior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Interestingly, the universal dictionary obtained for DΘ is markedly different from the one trained off-line for the original K-SVD denoising method, again a testimony to the major difference between the two design strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' LIDIA - Lightweight Learned Image Denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Another example of unfolding- based denoising is LIDIA [289], which mimics the computational stages of the BM3D [61].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As already mentioned in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3, BM3D harnesses two prime forces for its denoising goal – sparsity and self similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The first relies on the assumption that local image patches are sparse under the 2D-DCT spatial transform;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' the later is reflected by operating on groups of similar patches jointly, forcing sparsity again by transforming across these patches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Patch Decomposition Patch Combining y MN \uf0ce \uf07b \uf07d z y n k k k \uf0ce = R ˆx MN \uf0ce Noisy image M N \uf0b4 Reconstructed image M N \uf0b4 Nearest Neighbor Search \uf07b \uf07d n m k k \uf0b4 \uf0ce Z Filtering Network \uf07b \uf07d ˆz n k k \uf0ce Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2: The LIDIA denoising computational path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 19 T1ZkT2 ReLU T3ZkT4 n m k \uf0b4 \uf0ce Z ˆ s t k \uf0b4 \uf0ce Z Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3: The Transform-ReLU-Transform block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Applying the matrices T1 and T2 trans- forms the input, Zk, to a space in which patches are supposed to be sparse;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' the matrices T3 and T4 transform the outcome to the pixel domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Observe that the transform applied on Zk is separable – T1 is applied within patches while T2 operates across.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This enables a reduction of the size of the matrices T1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' , T4 in order to enable their training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' LIDIA’s core computational path is shown schematically in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This neural network starts by breaking the input image y into a set of fully overlapping patches {zk}k of size √n × √n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Then, each patch, zk ∈ Rn, is augmented with a group of its m − 1 nearest neighbors, forming a matrix Zk of size n × m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The filtering is applied patch-wise – each matrix, Zk, undergoes a series of blocks composed of a separable transform, ReLU, and another separable transform, as shown schematically in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This mimics the BM3D operation by transforming the input matrix to a space in which local patches are believed to be sparse, forcing sparsity using the ReLU layer, and transforming back the outcome to the pixel domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Unlike BM3D, the transforms are trainable and are not restricted to be the inverse of each other, nor forced to be square matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In addition, LIDIA includes a multi- scale treatment, simultaneously processing patches in several scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' During processing, the corresponding patches from different scales are fused using a learned joint transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Finally, the reconstructed image is obtained by returning the denoised patches to their original places while averaging overlaps using learned weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The LIDIA network is trained end-to-end (excluding the nearest-neighbor part) by minimizing the MSE loss, similar to the loss in Equation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3, applied on a set of M training images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network can be trained for a specific noise level σ or blindly, aiming to serve a range of σ values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' LIDIA performs much better than the original BM3D algorithm since it uses learned rather than fixed transforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Compared with other deep-learning techniques LIDIA achieves comparable results, while using a small fraction of the typical number of learned parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In Section 4 we mentioned the ability to adapt a given denoiser to newly coming images that deviate from the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This adaptation starts by applying the trained denoiser, and then uses the output in order to fine-tune the denoiser parameters by applying few gradient steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This rationale has been successfully demonstrated with LIDIA, and two such illustrative results are brought in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Summary - The classics is still here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We described two unfolding instances in which classic denoising algorithms provide their architecture for the learned network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These and other such methods [341, 250, 83, 204], targeting various image recovery tasks, offer a con- structive path towards well-motivated, low complexity and explainable neural architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In the quest for a synergy between classical denoising methods and novel deep-learning alter- 20 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN (a) Clean (b) Noisy, σ = 50 (c) Denoised, 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='22dB (d) Adapted, 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='34dB (e) Clean (f) Noisy, σ = 50 (g) Denoised, 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='10dB (h) Adapted, 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='82dB Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4: Image adaptation via LIDIA: The original denoising network is trained for general content images, and performs reasonably well for astronomy and scanned document inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A substantial boost in denoising performance is obtained for these two examples, due to their deviation from the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' natives, this is probably the most natural manifestation of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image Denoising – Migration towards Recent Discoveries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The clear conclusions from the above discussion are these: Highly effective image denoisers for AWGN removal, D(y, σ), are definitely within reach, and the better ones are likely to be deep-learning based algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In an attempt to illustrate these statements, Figures 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2 present denoising results for two test images, two noise levels (σ = 15, 50) and by several denoisers – NLM [32], BM3D [61], DnCNN [330], and SwinIR (a transformer-based denoising network) [165].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As can be seen, the results are very impressive and more so by the later deep neural network solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We now turn to ask far more daring questions with regard to such denoisers, focusing this time on their deployment to other tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More specifically, we discuss three such questions, each corresponding to a recent discovery in the field of imaging sciences: Discovery 1: Can we leverage a denoiser D(y, σ) for solving general linear inverse problems?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As we shall shortly see, the answer to this question is positive and construc- tive, opening new horizons for design of recovery algorithms and their regularization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Discovery 2: Can we leverage a denoiser D(y, σ) for synthesizing (hallucinating) high-quality images, fairly drawn from the prior probability density function p(x)?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Here again the answer is positive and constructive, offering a thrilling new line of activity in machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=" Discovery 3: If hallucination of perfect-looking images is achievable, can we revisit Amega ation'scAmega ationscAmega ation'scAmega ation'scIMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 21 (a) Clean (b) Noisy, σ = 50 (c) NLM, 24." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='67dB (d) BM3D, 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='31dB (e) DnCNN, 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='70dB (f) SwinIR, 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='31dB Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: Demonstration (1) of several denoising methods: (NLM [32], BM3D [61], DnCNN [330], SwinIR [165]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' the topic of general linear inverse problems and leverage a denoiser D(y, σ) for their solution while targeting perfect perceptual quality results?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Here again we give a positive answer, and lead to a new and inspiring branch of research in inverse problems, offering novel view of their treatment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Below we discuss each of these discoveries in greater details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' It is our sincere belief that these together form one of the most exciting eras for our field, marking a major transition in how image processing is perceived and practiced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Discovery 1: Solving Inverse Problems via Image Denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Given a denoiser D(y, σ) : RN → RN, our goal is to use it somehow for solving general linear inverse problems of the form y = Hx + v, (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) 22 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN (a) Clean (b) Noisy, σ = 15 (c) NLM, 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='82dB (d) BM3D, 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='23dB (e) DnCNN, 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='33dB (f) SwinIR, 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='17dB Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2: Demonstration (2) of several denoising methods: (NLM [32], BM3D [61], DnCNN [330], SwinIR [165]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' where H ∈ RM×N is a known matrix, v ∈ RM is AWGN, and y ∈ RM is the given mea- surement vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Observe that H = I stands for the denoising problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Therefore, the current discussion extends our view to a wider family of tasks in imaging sciences, cover- ing applications such as deblurring, inpainting, demosaicing, super-resolution, tomographic reconstruction, compressed sensing, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Following the derivations in Section 3 and specifically Equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3), we can adopt the Bayesian point of view and obtain the MAP estimation for this family of problems: ˆxMAP = arg min x �∥Hx − y∥2 2 2σ2 − log (p(x)) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) Plugging in the Gibbs distribution form for the prior, p(x) ∼ exp{−ρ(x)}, this becomes ˆxMAP = arg min x � ∥Hx − y∥2 2 + c · ρ(x) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3) 142551425514255142551425514255IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 23 Clearly, the greatest riddle posed above has to do with the identity of the energy function ρ(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Can a denoiser serve all linear inverse problems in a unified approach by providing a connection or an alternative to ρ(x)?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Surprisingly, the answer to this question is positive and constructive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The seminal Plug-and-Play Prior (PnP) work by Venkatakrishnan, Bouman and Wohlberg [295] was the first to provide such an answer7, followed and improved upon by RED (Regularization by Denoising) [231].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These and their various extensions and variations have created a vivid and stimulating sub-field of research in imaging sciences [28, 139, 283, 34, 268, 41, 192, 280, 5, 49, 55] in which denoisers play a central role.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Below we describe PnP and RED in more detail, and then turn to describe another, perhaps better founded, bridge between denoisers and the energy function ρ(x) via the score function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This would serve our next step towards diffusion models, as they unravel in Section 8 and beyond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Plug-and-Play Prior (PnP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' PnP [295] suggests the following steps in handling the minimization of the problem posed in Equation (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3): We start by splitting the variable x by defining z = x and expressing each of the two penalties with a different variable: ˆxMAP = arg min x,z � ∥Hx − y∥2 2 + c · ρ(z) � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' z = x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4) The next step forms the Augmented Lagrangian of the above problem, converting the con- straint into a penalty, L(x, z, u) = ∥Hx − y∥2 2 + c · ρ(z) + λ∥z − x + u∥2 2 − λ∥u∥2 2, (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='5) where u is the scaled dual variable and λ is an (arbitrary) penalty weight (see more in [295]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The third and final step applies ADMM [27] for the minimization of L(x, z, u) with respect to x and z while updating u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These are obtained by alternating between the treatment of each variable while fixing the others: x ← arg min x � ∥Hx − y∥2 2 + λ∥z − x + u∥2 2 � = � HT H + λI �−1 � HT y + z − u � , (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='6) z ← arg min z � c · ρ(z) + λ∥z − x + u∥2 2 � , (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='7) u ← u + (x − z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='8) In the above, the first update equation amounts to a simple Least-Squares, which does not involve ρ(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The true drama takes place in the second update formula – observe its close resemblance to Equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3), which formulates an image denoising task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, instead of choosing/guessing/learning ρ(x), we can apply our favorite denoiser ˆz = D(x−u, σ0) where σ0 should be inversely proportional to λ/c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This way, PnP offers an appealing iterative algorithm that repeatedly applies a denoiser in order to handle any underlying inverse problem, just as promised.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While the original PnP paper did not dive into the issue of convergence of the above ADMM algorithm, nor posed conditions on the denoiser to support such guarantees, later work offers such a theoretical discussion – we refer the interested readers to [42, 309, 269, 155].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7We should note that an alternative, yet closely related, derivation is offered in [196] from an approximate message passing point of view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 24 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Regularization by Denoising (RED).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An alternative angle towards the relationship between ρ(x) and image denoising is presented in [231].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The core idea is quite simple, using the following explicit formula for ρ(x) that relies on a denoiser: ρ(x) = xT [x − D(x, σ0)] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='9) The intuition behind this expression can be uncovered by considering a linearized form of the denoising process, D(x, σ0) = S(x)x, where S(x) is an image-dependent matrix that represents the smoothing applied by the noise removal process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This way, the chosen energy function becomes ρ(x) = xT [I − S(x)] x, which is a Laplacian smoothness prior of the kind described in Section 3, although being image-adaptive (and thus far more effective).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The work in [231] shows that if the denoiser D(x, σ0) is differentiable, passive and of symmetric Jacobian, the chosen energy function in Equation (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='9) is guaranteed to be convex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' If, in addition, the denoiser satisfies a local homogeneity property8, then the following relationship holds: ∇xρ(x) = 2 [x − D(x, σ0)] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='10) This relationship is a centerpiece in the construction of several RED algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Plugging the chosen ρ(x) from Equation (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='9) into Equation (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3) implies that the gradient of this functional is easily accessible, requiring a single activation of the chosen denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Critically, this gradient does not require the differentiation of D(x, σ0), which would have required far more computational power and memory consumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As a consequence, various gradient- based optimization strategies can be applied for computing ˆxMAP , and all are guaranteed to converge to the global minimizer of the MAP penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Again, we arrive at iterative algorithms that apply simple linear operations and a denoiser in each step, aiming to solve general linear inverse problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An intriguing question with respect to the above is the identity of the denoiser to use within RED.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Should it be an MMSE denoiser?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Should it be designed to remove AWGN?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Would these choices lead to the required properties mentioned above (diffentiability, symmetry, passivity, homogeneity)?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' What should σ0 be?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Partial answers to these questions are given by the next discussion on the score function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The Score Function and its Relevance to Inverse Problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Embarking from Equa- tions (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) and (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3), we now present a very different approach towards getting to the same RED formulation, regularizing inverse problems via a denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Assume that our goal is to find ˆxMAP by Steepest Descent (SD), and thus our iterative formula should be ˆxk+1 = ˆxk − µ � HT (Hˆxk − y) − c · ∇x log p(x)|ˆxk � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='11) The term ∇x log p(x) is known in the statistical literature as the score function, being a flow- field that describes the optimal ascent direction over the log of the prior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An old mathematical 8See [231] for the exact definitions of these ingredients and for the proof of their implications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 25 result, commonly attributed to Miyasawa [200], Stein [265], or Tweedie [84], and re-exposed in [138], proves that (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='12) ∇y log p(y) = D(y, σ0) − y σ2 0 , where y = x + v is a noisy version of x with v ∼ N(0, σ2 0I), and D(y, σ0) should be the optimal Minimum Mean Squared Error (MMSE) denoiser, E(x|y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A proof of this result is brought in Appendix D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While it is impossible to obtain the MMSE denoiser (as p(x) is unknown), modern deep learning-based denoisers perform very well (see Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1), and therefore constitute a good approximation for it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' And so, while Equation (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='11) expects to use the score function that refers to p(x), a denoiser can provide an approximation of it that considers a slightly blurry probability density function9 p(y) = p(x) ⊗ N(0, σ2 0I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' When σ0 is small enough10, this approximation becomes very effective and the resulting algorithm admits the following update rule: ˆxk+1 = ˆxk − µ � HT (Hˆxk − y) + c (xk − D(ˆxk, σ0)) � , (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='13) which is exactly the SD version of RED [231].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Summary: Denoisers for Solving Inverse Problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figures 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 and 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2 present illustrative results of PnP [295], RED [231], and NCSR [74] for deblurring and single-image super-resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Note that while NCSR is specifically tailored to handle these two applica- tions, PnP and RED are unaware of the underlying task, and use a given denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The tests presented employ both a simple median filter and the TNRD denoiser [50].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Surprisingly, even a plain denoiser as the median filter can provide some recovery effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More details on these experiments and more results can be found in [231].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' PnP and RED have drawn much interest in our community in the past several years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Followup work has been considering a theoretical analysis of the two methods [42, 278, 225, 94, 309, 269], deployment of the proposed algorithms in various applications [263, 28, 139, 49], creation of new variants of these two methods [283, 279, 268, 280, 267, 123, 56], and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An appealing outlet of this work returns to the unfolding idea discussed in Section 5: PnP/RED can be used to define well-motivated architectures for solving general inverse problems, by unfolding the proposed algorithms, and then training the repeated denoiser to best serve a series of inverse problems jointly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This way, by plugging in the degradation operator H, a single network can treat a variety of tasks in image processing, built around a core learned denoising engine [194, 229, 73, 192, 333].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Discovery 2: Image Synthesis via Image Denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The deep learning revolution has enabled several capabilities that were previously thought to be practically impossible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Among the most intriguing such capabilities is image synthesis – the ability to generate a variety 9See Appendix D for a justification of this claim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 10RED [231] suggests to use σ0 ≈ 3 − 5 for images with 256 × 256 gray-values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 26 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN (a) Ground Truth (b) Input 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='83dB (c) RED (Median) 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='87dB (d) NCSR 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='39dB (e) PnP (TNRD) 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='43dB (f) RED (TNRD) 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='82dB Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: Visual comparison of deblurring results by PnP and RED.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' NCSR [50] is brought as a reference to compare with.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' of natural-looking images, without conditioning on any kind of input or initialization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More formally, the goal of image synthesis is to obtain a random generator whose outputs follow the prior distribution of images x ∼ p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Succeeding in this task would testify that we have seized the true distribution of images, and this may aid in solving a variety of imaging tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A common theme in the definition of such image generators is the need to design of a learned machine GΘ(z), which admits a simply distributed input vector z (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', z ∼ N(0, I)) and converts it to a valid sample from p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' GΘ(z) is a neural network parameterized by Θ, and various techniques were conceived in the past decade for learning Θ for best fitting the synthesized results with the destination PDF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In this context, the main tool of interest, which popularized image synthesis, is called GAN – Generative Adversarial Network [107].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While alternatives to GANs do exist, such as Variational Auto-Encoders (VAE) [151], Normalizing Flow (NF) techniques [228, 150], Autoregressive models [293], and energy-based methods [118, 79], GANs were typically at the lead in image generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Since their introduction and until recently, GANs have undergone various improvements [222, 11, 112, 327], and achieved stellar IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 27 (a) Ground Truth (b) Bicubic 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='68dB (c) RED (Median) 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='44dB (d) NCSR 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='79dB (e) PnP (TNRD) 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='61dB (f) RED (TNRD) 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='39dB Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2: Visual comparison of super-resolution (3:1) results by PnP and RED.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' NCSR [50] is brought as a reference to compare with.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' performance [29, 141, 249].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, this changed dramatically with the arrival of diffusion models [257, 260, 120].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' GANs, and the other generative models mentioned above, are detached from the topic of image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In contrast, diffusion models heavily rely on the score function and thus on image denoisers for addressing the task of image synthesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This recent line of work that started to gain traction, aptly named score-based generative models [260, 261] or denoising diffusion probabilistic models [257, 120], utilizes deep learning-based denoisers to approximate the score function, which is then used in an iterative algorithm to obtain images x that are fair samples from the PDF p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The iterative algorithms used for generation in this context are largely based on Langevin dy- namics [230, 19], a Markov Chain Monte Carlo (MCMC) method with the following transition rule: (8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) xt+1 = xt + α∇xt log p(xt) + √ 2αzt, where zt ∼ N(0, I), and α is an appropriate small constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Initialized randomly, after a sufficiently large number of iterations, and under some mild conditions on p(x), this process converges to a sampling from the distribution p(x) whose score function is used [230].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Intu- itively, the algorithm follows the direction of the gradient of the log-probability, climbing from one image to a more probable one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is a gradient ascent process, and the noise is added 28 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN in each iteration to provide stochasticity, which effectively leads to sampling from p(x) rather than converging to a local maximum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While it is tempting to use the true data distribution’s score function in Langevin dynamics, a few problems prevent such a use [260].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' One of the main issues lies with the well-known cardinal manifold assumption [239], which relies on the observation that natural images reside on a low-dimensional manifold in their embedding space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Therefore, for a random initialization of x0, it holds with probability 1 that p(x0) = 0, rendering the score function undefined at best, and without an ability to drift towards the image manifold in subsequent iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A possible solution is to approximate p(x) by its slightly blurred counterpart p(y), where y = x + v, v ∼ N(0, σ2I), with a very small σ [296].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This resolves the aforementioned problem, as the Gaussian noise distribution has infinite tails.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, in practice, such a Langevin sampling algorithm requires many thousands of iterations to converge [155], hindering its practical applicability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The authors of [260] suggest the Annealed Langevin Dynamics (ALD) algorithm11, which considers a sequence of Gaussian noisy image distributions p0(y), p1(y), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' , pL−1(y), pL(y) with standard deviations σ0 > σ1 > · · · > σL−1 > σL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Applying a few iterations of Langevin dynamics for each of the distributions, starting with a very large σ0 and ending with a very small σL, enables a faster convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Each of these steps is applied using a denoiser that estimates the score function, and the output of each such process is used to initialize the next.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This implies that the synthesis creates a chain of noisy images with diminishing levels of noise, starting with pure canonical Gaussian noise and gradually carving out an image content out of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Intuitively, this translates to drawing from a wide distribution and then gradually narrowing it, leading to faster sampling and better performance in image generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Algorithm 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 presents this image sampler: The outer loop sweeps through the L + 1 values of σ, while the inner loop applies T Langevin steps for each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The score function ∇x log pi(x), which stands for the σi-blurred PDF of x, is approximated by ∇x log pi(x) = D(x, σi) − x σ2 i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) Observe that the step size α is modified throughout this process, chosen to be proportional to σ2 i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This aligns with the fact that larger σ values imply a more regular and smooth PDF, which is easier to sample from.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='12 Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 presents several examples of temporal steps in the ALD process that starts with pure Gaussian noise and ends with a high-quality synthesized image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 11A very similar algorithm has been proposed in parallel by [120].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Preceding these two works is the one reported in [257] who proposed a similar process while relying on a different rationale borrowed from statistical physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 12A different explanation for this choice of the step size is given in [260], motivated by a desire to better balance the norms of the score versus the additive noise in the Langevin update formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 29 Algorithm 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 the Annealed Langevin Dynamics (ALD) algorithm Input: {σi}L i=0, ϵ, T Initialize x0 ∼ N(0, I) for i ← 0 to L do αi ← ϵ · σ2 i /σ2 L for t ← 1 to T do Draw zt ∼ N (0, I) xt ← xt−1 + αi [D(xt−1, σi) − xt−1] /σ2 i + √2αizt end x0 ← xT end Output: x0 The ALD algorithm sparked a wave of related works [261, 120, 262, 208, 287, 68, 122, 143, 121] that continually improved the performance of these generative diffusion models, eventually surpassing that of GANs [68].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We show some of their results in Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Neverthe- less, these iterative algorithms are still considerably slower than GANs, so substantial work has been invested in improving their speed without compromising significantly on gener- ation quality [258, 135, 247], often achieving impressive speedup levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Diffusion models have since become ubiquitous in many applications [142, 209, 21, 116, 6, 253, 254, 144], prompting researchers to prepare surveys of their impact on the image processing field and beyond [315, 60, 36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: Temporal steps along 3 independent synthesis paths of the Annealed Langevin Dynamics [260] algorithm, using a denoiser [261] trained on LSUN bedroom [319] images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Discovery 3: High Perceptual Quality Image Recovery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We are now stepping into the last and what we believe to be one of the most exciting topics in the story of image denoisers – solving general linear inverse problems while striving for perfect perceptual quality, and achieving this with the support of an MMSE denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We start with the simplest inverse problem – image denoising itself – and grow from there to more general recovery tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 30 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2: Image generation results for CelebA-HQ [172] (left) and ImageNet [66] (right) using score-based denoising diffusion generative models [262, 68].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Revisiting the Image Denoising Problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We return to the classic image denoising problem, where y = x+v in a given noisy image, x ∼ p(x) is it’s ideal origin, and v ∼ N(0, σ2 y) is the AWGN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our goal is to recover x, but now we change the rules of the game by expecting high perceptual quality results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' How could this be achieved?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Throughout the classical era of denoising, and well into the modern AI days, denoisers were mostly evaluated using the Mean Squared Error (MSE) measure shown in Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) (or tightly related measures such as the Peak Signal-to-Noise Ratio – PSNR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As can be seen in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1, MSE has been and still is a commonly used performance measure for denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The MSE metric has several clear benefits: it is zero when the denoiser perfectly recovers the image, it is intuitive to understand, and it produces mathematically elegant results for theoretical analysis, as well as practical considerations such as ease of differentiation for optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, the MSE distortion measure suffers from a critical shortcoming: As discussed in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 and in Appendix A, the best possible result in MSE (MMSE), regardless of the denoising method used to approximate it, would rely on a conditional expectation, ˆxMMSE = arg min ˆx E � ∥x − ˆx∥2 2 � = � x xp(x|y)dx = E (x|y) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) In other words, when optimizing for MSE, our main goal is to get as close as possible to the original image in expectation, and this implies an averaging over all possible solutions, weighted by their posterior probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, depending on the geometry of the image man- ifold and the severity of the noise, the MMSE solution may tend to be too blurry and of relatively low probability p(ˆxMMSE), falling outside of the desired manifold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We illustrate this phenomenon in a 2-dimensional example in Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, the fact that MMSE denoising achieves optimal L2 distortion necessarily implies that perceptual quality is compromised.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The authors of [22] prove the existence of a “perception- distortion tradeoff”: distortion (of any kind!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=') and perceptual quality are at odds with each IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 31 Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: A 2-dimensional qualitative demonstration of the disadvantages of MMSE denois- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Given a noisy image, the MMSE denoiser falls outside of the image manifold, whereas a posterior sampler would necessarily sample points that reside on it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This leads to better perceptual quality in the denoising results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' other, and optimizing one necessarily deteriorates the other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In this context, perceptual quality is defined as the proximity between the original image distribution p(x), and the denoised image one p(ˆx).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2 presents the essence of these findings in [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2: The perception-distortion trade-off [22]: Any recovery algorithm necessarily per- forms on the blue-curve or above it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' On the perception-distortion bound curve, the top-left point refers to the MMSE estimation, while the right-bottom one (or right to it – see [22]) is obtained by a posterior sampler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A gap of 3dB divides between the two when using the MSE distortion measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' With this tension between visual quality and distortion in mind, alternative approaches to MSE were developed over the years, aiming for high perceptual quality denoising [69, 67, 212, 146].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' One such technique is to sample from the posterior distribution: given a noisy image Legend: Image manifold MMSE result E(xly) Noisy image Probable samples from the posterior distributionPerception MMSE 3dB Possible Better visual quality Alg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1 Region Alg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2 Alg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3 Alg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4 Impossible Posterior Region Sampler Distortion Less Distortion32 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN y, we aim to develop a denoiser that outputs ˆx ∼ p(x|y), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', samples from the posterior distribution of pristine images given the noisy measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A successful posterior sampler would achieve perfect perceptual quality, as when marginalizing over y, we get p(ˆx) = p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' It is important to notice that this technique involves a subtle paradigm shift – the denoiser is no longer a deterministic function of the noisy input y, but rather a stochastic one and this implies a multitude of possible solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In the following, we present two pragmatic approaches for approximating posterior sampling behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' To traverse the perception-distortion tradeoff, a Waserstein Generative Adversarial Network (WGAN) conditioned on noisy images can be used [22, 69].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Such a network consists of two main elements: a generator, which takes a noisy image as well as a random vector as input, and outputs a denoised image, and a discriminator, whose job is to distinguish between denoised and original images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The discriminator is trained to discriminate between the generator’s outputs and original images, while the generator optimizes two loss functions: the MSE with respect to the original image, and the ability to “fool” the discriminator, thus encouraging its output to “look like a real image” in the eyes of the discriminator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These two losses, as proven in [22], are at odds with one another, and tuning their respective weights in the total loss function translates to the traversal of the perception-distortion tradeoff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This idea is further improved upon by [212]: instead of requiring low distortion on individual generator samples, the requirement is made on their mean.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This results in a loss function that encourages the generator to act as a sampler from the posterior distribution, therefore attaining near-perfect perceptual quality while remaining faithful to the input image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' An alternative posterior sampling approach, which reconnects with MMSE denoisers, is using the annealed Langevin dynamics algorithm [260] presented in the previous section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Recall that ALD uses the score function ∇˜x log pi(˜x) to sample from a prior distribution pi(˜x)13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In [146], the regular ALD algorithm is extended to treat image denoising by analytically conditioning the score function on a noisy input y – effectively sampling from the posterior distribution pi(˜x|y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The algorithm is initialized with the noisy input y, which is then gradually denoised using the conditional score function, obtained using the Bayes rule, (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) ∇˜x log pi(˜x|y) = ∇˜x log p(y|˜x)pi(˜x) p(y) = ∇˜x log pi(˜x) + ∇˜x log p(y|xt).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The term ∇˜x log pi(˜x) is the regular score function which can be approximated by an MSE- trained denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As for the other term, ∇˜x log p(y|˜x), observe that this likelihood can be rewritten by exploiting two facts: (i) y = x + v is the noisy image (v ∼ N(0, σ2 yI), and (ii) ˜x = x + z is the annealed solution (z ∼ N(0, σ2 i I)), and thus p(y|˜x) = p(y − ˜x|˜x) (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3) = p(x + v − x − z|˜x) = p(v − z|˜x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 13In these notations, pi stands for a σi-blurred PDF version of the original prior p(x), and ˜x is a temporary synthesized image that contains annealing Gaussian noise with variance σ2 i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 33 If we assume statistical independence between the measurements’ noise and the annealing one, v − z becomes a plain Gaussian vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, its conditioning on the knowledge of ˜x leads to a dead-end, since this image contains z in it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The alternative, as developed in [146], is to construct the annealing noise such that v − z is statistically independent of both z and ˜x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This can be obtained by breaking the measurements’ noise v into small fragments, and assume that their partial accumulations constitute the annealing noise in each of the stages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, v − z is a white Gaussian noise that has no correlation with the noise z, nor with the target image x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Put in other words, this likelihood expression becomes simple when considering y to be an even more noisy version of ˜x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This in turn makes p(y|˜x) a simple white Gaussian distribution of the form N(0, (σ2 y − σ2 i )I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Therefore, (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4) ∇˜x log pi(˜x|y) = ∇˜x log pi(˜x) + y − ˜x σ2y − σ2 i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Plugging this modification into ALD turns Algorithm 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 into an image denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Beyond its ability to attain near-perfect perceptual quality, this approach has the advantage of not requiring any special model training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Crucially, this finding shows that simple MSE denoiser training is more powerful than originally thought – not only can it approximate MMSE de- noiser behavior, but it can also perform denoising by posterior sampling under the Langevin dynamics scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3 presents a denoising result by the above-described method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Several observations are in order from this figure: The generated results are indeed of very high perceptual quality;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Running ALD several times results with different solutions, all valid and yet diverse – see the STD image that exposes the uncertainty within the task being solved;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Denoising y directly by D(y, σy) leads to better MMSE but poorer perceptual quality;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The figure also shows the evolving solution within the ALD steps, and as can be seen, the noise in y is effectively peeled layer by layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' High Perceptual Quality Solution to Inverse Problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We now expand our discus- sion by returning to general linear inverse problems of the form y = Hx+v, where H ∈ RM×N is a known matrix, v ∈ RM is AWGN, and y ∈ RM is the given measurement vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our goal is to propose novel solutions to these problems while striving for high perceptual quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The above discussion on the perception-distortion tradeoff is not limited to image denoising, but also applies to more general inverse problems [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' There too, potential solvers need to tradeoff distortion metrics (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' MSE) versus perception measures (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' the distribution shift between real images and the obtained solutions).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Indeed, MSE in these cases may become far more challenging as an optimization goal due to the ill-posedness of the inverse problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Consider, as an example, an inpainting problem in which the bottom half of the image is given and the goal is to recover the top part.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The MMSE solution in this case necessarily averages all possible completions, resulting in a very blurry outcome.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More broadly, optimizing for MSE in this context would result in a clear regression-to-the-mean, which is significantly more pronounced in under-determined inverse problems than in image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 34 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN Original MMSE Noisy STD Samples from the Posterior Distribution Intermediate Denoising Steps Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3: Image denoising using the modified version of Annealed Langevin Dynamics [146].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Top row (left to right): An original image, its noisy version (σy = 100), the MMSE-optimized denoiser’s result, and the STD of the sampled solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Middle row: 6 sampled ALD denoising solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bottom row: 6 intermediate steps within the ALD algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Successful inverse problem solvers, such as the Plug-and-Play Prior [295] and RED [231] algorithms mentioned in Section 7, aim for a Maximum-a-Posteriori (MAP) solution to the inverse problem at hand, rather than MMSE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While these methods achieve impressive results, the MAP solution can be improved upon in terms of perceptual quality without compromising on distortion performance [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is due to the deterministic nature of MAP solvers – a solver that aims for best perceptual quality should necessarily be stochastic in order to account for the multiple possible solutions to the given problem [211].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Similar to the image denoising case, stochastically sampling from the posterior distribution achieves perfect perceptual quality in general inverse problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Following the road paved in the previous section, an appealing way to approximate such sampling would be to follow Equation (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2), using a generative diffusion model and augmenting the score by an analytical term that conditions on the observed measurement y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This idea has been initially suggested by [262, 138] for handling noiseless linear inverse problems, and later extended to the more general case in [145, 142, 54, 53, 195].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Below we describe the essence of the proposed approach in SNIPS [145].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Visual examples of this method in action are brought in Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4 for several inverse problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our goal is to obtain a closed-form expression for the term ∇˜x log p(y|˜x) in Equation (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We use the following two relationships: (i) y = Hx+v is the noisy measurement (v ∼ N � 0, σ2 yI � ), and (ii) ˜x = x + z is the annealed solution (z ∼ N(0, σ2 i I)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The likelihood function can be IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 35 Original Degraded MMSE Samples from the Posterior Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4: Comparison of an MMSE result with samples from the posterior distribution using SNIPS [145].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Note the subtle improvements in perceptual quality from MMSE to the posterior samples, especially in the finer details such as the hair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The comparison is conducted on 64×64 pixel images from CelebA [172], on the problems of compressive sensing, inpainting, and 4× super-resolution (top-to-bottom).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' simplified to p(y|˜x) = p(y − H˜x|˜x) (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='5) = p(Hx + v − Hx − Hz|˜x) = p(v − Hz|x + z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As in the denoising case in Equation (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3), statistical independence between v and z cannot be assumed due to the dependency on ˜x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The alternative, as shown by SNIPS [145] relies again on a delicate connection between these two random entities, obtained by a decoupling of the measurements’ equation via an Singular Value Decomposition (SVD) of the degradation matrix H = UΣVT : p(y|˜x) = p(v − Hz|x + z) (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='6) = p(UT v − ΣVT z|VT x + VT z) = p(ˆv − Σˆz|ˆx + ˆz) = � k p(ˆvk − skˆzk|ˆxk + ˆzk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The second row in the above equation is obtained by transforming the term v − Hz by the matrix UT , and similarly transforming x + z via a multiplication with VT .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As these are unitary matrices, the transformations applied do not change the statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Considering the transformed vectors UT y = ˆy, VT x = ˆx, VT z = ˆz and UT v = ˆv leads to the third row 36 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN in the above equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This joint probability can be decoupled into a separable Gaussian distribution if we choose each entry ˆvk − skˆzk to be independent of ˆzk, just as practiced in the denoising case, and this time while taking into account the singular value sk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This algorithm, fully described in [145], demonstrates considerable success in a number of inverse problems (see Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4), and already has several followup works [142, 54, 53, 195].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We should mention that an alternative to all the above exists, in which one simply adds the corrupted measurements y as an input to the denoising model itself, effectively condition- ing the entire generative process on y [245, 243, 303].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This approach requires designing and training a separate denoiser for each inverse problem, as the denoiser would need to implic- itly learn the connection between the images and their corresponding measurements for the specific problem at hand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Interestingly, this approach requires pairs of images, x and y, in its training, but does not utilize knowledge of the degradation model itself (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', the matrix H).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This property allows this alternative approach to generalize beyond clearly formulated inverse problems, and handle tasks such as stylization, JPEG-deblocking, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' "a DSLR photo of a kangaroo walking in New York City" "an oil painting by Matisse of a humanoid robot playing chess" "a stern-looking owl dressed as a librarian, digital art" "3D render of a small green balloon dog in a light pink room" "a photo of a wild boar in a street, wearing headphones" Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='5: Examples of synthesized images using DALL-E 2 [224], a text-to-image generative denoising diffusion model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The input conditioning text is written below each image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A particularly interesting case is when y is a textual description of the image contents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' By conditioning the denoiser model on such text, the generative diffusion process allows users to perform text-to-image generation [224, 244, 232, 14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This unprecedented capability became instantly popular, as users were able to synthesize high-quality images by simply describing the desired result in natural language, as we demonstrate in Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These models have become a centerpiece in an ongoing and quickly advancing research area, as they have been adapted for image editing [147, 202], object recontextualization [241, 95], 3D object generation [220], and more [119, 129, 213, 346].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Conclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Removal of white additive Gaussian noise from an image is a fascinating topic, both because it poses a very interesting engineering challenge, and even more so, because it creates new opportunities in image processing and machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In this paper we highlight these two branches of activities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The first half of the paper concentrates on the design of such denoisers, with a particular interest on the impact of the AI revolution on this field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The second half of the paper features the usefulness of such image denoisers for handling other tasks, such as image synthesis and solving inverse problems while targeting IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 37 high-perceptual quality solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figure 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 encapsulates this part of the story in a block diagram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Much remains to be done in this domain, in better understanding how to design appropriate MMSE denoisers, and in harnessing them to other tasks beyond the ones described in this paper, such as compression, segmentation, and more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More broadly, there are so many op- portunities and challenges in better understanding, designing, and proposing creative usage of image denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figure 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: A summary of the main message of this paper: an MMSE denoiser is key in synthesizing images and solving inverse problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Interestingly, there is a great unexplored proximity between PnP and RED algorithms [295, 231] and the more recent, diffusion-based, techniques for getting high perceptual quality solutions for inverse problems [145, 53, 195].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Suppose that we are given an MMSE denoiser D(y) WE CAN USE D(y)FOR .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='. Solving ANY Synthesizing Denoising images Solving ANY natural- while targeting inverse inverse problem looking problem high perceptual with high (PnP/RED) images quality perceptual quality All the above are achieved by simply applying D(y) iteratively38 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Derivation of the MMSE Estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Consider an ideal image x drawn from the probability density function p(x), and assume that we are given a measurement of it, y, related to it via the conditional probability p(y|x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Our goal is to find the estimator ˆx = f(y) that minimizes the expected mean-squared-error, MSE = E � ∥x − ˆx∥2 2 | y � = E � ∥x − f(y)∥2 2 | y � = � ∥x − f(y)∥2 2p(x|y)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) Observe that this expectation is taken with respect to the unknown image x, while considering y as known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In order to minimize the above measure, we take a derivative of this expression with respect to f(y) and null it, d df(y) � ∥x − f(y)∥2 2 p(x|y)dx = −2 � (x − f(y)) p(x|y)dx = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) This results in � xp(x|y)dx = � f(y)p(x|y)dx = f(y) � p(x|y)dx = f(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3) The last step on the right-hand-side relies on the fact that � p(x|y)dx = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, we get the familiar closed-form solution for the MMSE estimation [217], fMMSE(y) = � x xp(x|y)dx = E (x|y) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4) As a final step, as the posterior is not directly accessible, we may use the Bayes rule [137] and write fMMSE(y) = � x xp(y|x)p(x) p(y) dx = � x x p(y|x)p(x) � x p(y|x)p(x)dxdx, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='5) where this formula uses the ingredients we started with – p(y|x) and p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A Closer Look at the Evolution of Priors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Using the Gibbs distribution form, p(x) = c · exp{−ρ(x)}, we shift our focus from the prob- ability density function p(x) to it’s corresponding energy function ρ(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 brings a list of possible analytical expressions for ρ(x) as evolved in the image processing literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Below we describe each of these options briefly, adopting the context of solving a general linear inverse problem of the form y = Hx + v with the assumptions that v ∼ N(0, σ2I) and H is a full-rank known matrix of size m × N (m < N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The MAP estimation in this case is given by ˆxMAP = arg min x �∥Hx − y∥2 2 2σ2 − log (p(x)) � (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) = arg min x � ∥Hx − y∥2 2 + c · ρ(x) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 39 Notice that 2σ2 was absorbed into the constant: c = 2σ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Armed with this expression, let’s consider each of the choices in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 and explore its implications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Before diving into these options, observe that without the regularization provided by ρ(x), the above optimization becomes an ill-posed Least-Squares problem with infinitely many possible solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, the added prior serves as an important regularization, pushing towards a single (and hopefully, meaningful) solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Energy regularization: If HT H cannot be inverted, the most obvious algebraic remedy would be to add a constant to its diagonal, resulting with the regularized solution ˆxMAP = (HT H + cI)−1HT y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is exactly the solution offered by the choice ρ(x) = ∥x∥2 2, and when the constant c is taken to 0, this leads to the familiar pseudo-inverse solution ˆxMAP = H†y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While mathematically appealing, this option does not yield satisfactory visual results [251].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Spatial Smoothness: It is well-known that adjacent pixels in natural images are more likely to be of smoothly varying values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, penalizing a deviation from such a smoothness property seems well-justified [15, 154].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Plugging the option ρ(x) = ∥Lx∥2 2 into the MAP expression leads to the closed-form solution ˆxMAP = (HT H + cLT L)−1HT y, which is very-closely related to the well-known Wiener filter [304].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Optimally Learned Transform: Given a large enough dataset of images, we could fit a multivariate Gaussian N(0, R) to them by adjusting the second moment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The assumed zero mean is easily obtained by subtracting the mean image from the given data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' PCA [215] or Karhunen-Lo´eve Transform (KLT) [174, 140, 37, 136] offer a clear computational path towards this moment matrix R as the auto-correlation matrix of the available data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' When the expression ρ(x) = xT R−1x is plugged into the MAP esti- mation, we come back to the Wiener filter, this time as ˆxMAP = (HT H+cR−1)−1HT y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Note that the same treatment could emerge from this formulation – ρ(x) = ∥Tx∥2 2 = xT R−1x, where T is the corresponding transform that should be applied on x, and clearly TT T = R−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Weighted Smoothness: All the above options suffer from the same difficulty – they produce overly smoothed results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In retrospect, the reason is obvious: non- smooth behavior is heavily penalized and thus not encouraged, which results with smeared edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A way to overcome this difficulty is to produce a weight map that describes the local smoothness tendency – regions in which smoothness is believed to be correct should be assigned with a high weight, while low weight should be given to regions suspected to be textured or edges [246, 52].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' By constructing a diagonal matrix W that contains the above weights as the main diagonal, and using the choice & ρ(x) = ∥Lx∥2 W, the MAP estimation becomes ˆxMAP = (HT H + cLT WL)−1HT y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is a spatially adaptive solution, dependent on the local weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' One may consider an iterative approach where the temporary solution ˆxMAP is leveraged to update the weights and then ˆxMAP is re-computed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This interesting option leads to the robust statistics alternative discussed next [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Before proceeding with the other prior options, we would like to draw the readers’ attention to 40 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN the fact that all the above choices correspond to the core assumption that the probability den- sity function p(x) is a multivariate Gaussian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The obtained visual results of these techniques expose the fact that this Gaussianity assumption is not adequate, and indeed, later research in image processing turned to non-Gaussian and heavy-tailed alternative distributions, which we discuss next.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Robust statistics: Here is a simple experiment – take any natural image, apply a Laplacian on it, and gather a histogram of the resulting values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This histogram is likely to look as a heavy-tailed probability density function of a form similar to c · exp(−|x|α) with α ≪ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is exactly the deviation from Gaussianity referred to above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, the robust statistics alternative [126, 97, 96, 44, 238] suggests a replacement of the L2-norm of Lx by L1 or, more broadly, by functions of the form 1T µ{Lx} (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' µ(x) = |x|α).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Notice that from here on, closed-form MAP solution cannot be obtained, and iterative minimization strategies are necessary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Adopting a different point of view, robust statistics considers pixel on edges and tex- tures regions as outliers to the Gaussian distribution, and thus use robust estimation techniques for their better handling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Total-Variation (TV): The same motivation as described above led to this brilliant PDE formulation of spatial smoothness, ρ(x) = � v∈Ω |∇x(v)|dv, which accumulates the length of the spatial gradients instead of their squares [240].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In its discretized form, its treatment is very similar to the robust-statistics option.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' However, TV has very different roots, providing a geometrically oriented edge-preserving measure of smoothness – see various extensions of this line of work in [17, 99, 39, 3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Other PDE-based options: While TV applies an L1-norm on the spatial gradients, more general options can be envisioned, in which the accumulation is spatially adap- tive, orientation sensitive, geometrically faithful, and more [216, 38, 255, 302, 111].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Starting with the seminal anisotropic diffusion method by Perona and Malik [216], various such methods of the form ρ(x) = � v∈Ω g � ∇x(v), ∇2x(v) � dv were proposed and perfected over the years, forming an exciting sub-field of mathematically oriented image processing that relies on the vast knowledge in partial differential equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Field-of-Experts (FoE): Let us return to the robust statistics option described above and enrich it by considering a mixture of such distributions, ρ(x) = � k λk1T µk{Lkx}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This implies the need to define a series of functions µk and their corresponding weights λk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' FoE suggests to learn these elements from an image dataset, thus better fitting the assumed prior to natural images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While earlier work on FoE [236] suggested a patch- based maximum-likelihood learning approach, later efforts [50] brought a deep-learning alternative tools to this fitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wavelet sparsity: The idea of relying on transform coefficients for constructing ρ(x) has already been explored in the context of the KLT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The emergence of the Wavelet transform in the late 80’s brought a new way of thinking about signals and images, offering an elegant and more effective multi-scale representation that relies on non- linear approximation [77, 76, 57, 98, 185, 177, 43, 221, 175, 325, 108].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wavelets offer IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 41 a concise description of the data with as few as possible coefficients, this way giving birth to the central notion of sparsity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This translates well to the proposed prior ρ(x) = ∥Wx∥1 that promotes fewer non-zero dominant Wavelet coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As an interesting side note, if we are handling the image denoising problem – i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H = I in Equation (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) – and the Wavelet transform matrix W is unitary, the solution ˆxMAP has a closed-form solution, obtained via a soft-shrinkage [77, 76].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Self-similarity: So far we described two primary forces that promote simplicity in image content – spatial smoothness and representation sparsity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Self-similarity is a third such force that has been recognized as central by series of contributions, starting with the seminal Non-Local-Means (NLM) algorithm [32], and heavily relied upon by the famous BM3D [61] and other algorithms [181, 274, 187, 46, 297, 203, 273, 248, 167].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Self-similarity stands for the assumption that any given (small-enough) patch in an image is likely to find very similar ones in the image support, and thus treating these together somehow is likely to lead to better recovery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' More specifically, the expression we bring here as an illustration, ρ(x) = � k � j∈Ω(k) d{Rkx, Rjx}, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) sweeps through the image support, extract a patch in location k by the operator Rkx, and finds all its corresponding matches j ∈ Ω(k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Forcing proximity between Rkx and the patches Rjx induces a strong regularization over the unknown image x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sparsity methods: While the notion of sparsity has already been exploited by wavelets, later work took this idea and strengthened it by considering redundant and learned representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Under the assumption that ideal images can be described as linear combinations of atoms from a pre-specified dictionary D, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', x = Dα, forcing sparsity on the representation via the term ∥α∥0 provides an appealing and computa- tionally feasible choice for ρ(x) [31, 88].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vast work along these lines has been done, considering global dictionaries and later local (patch-based) ones, leading to various very successful recovery algorithms [89, 90, 4, 183, 182, 181, 81, 320, 71, 74, 100, 72, 85].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Low-Rank assumption: The last member to enter the Pantheon of image priors for image processing relies on a low-rank assumption over groups of similar patches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This idea is closely related to the self-similarity force described above, and in fact builds on top of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Given a set of closely related patches, instead of forcing proximity between them, one may gather these as columns in a matrix and force a low-rank structure, implying that all these patches are spanned by few main directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Several very strong recovery algorithms leveraged this idea in various forms, while exploiting theoretical analysis that ties the low-rank requirement to the nuclear-norm [305, 35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' By summing these norms over such groups, ρ(x) = � k ∥XΩ(k)∥∗, a very potent regularization is obtained [110, 310].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' As a summary, the above-described evolution of the priors has served as the skeleton of image processing, forming the consistent progress of this field over the years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This evolution is 42 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN Table B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1: Evolution of priors for images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Years Core concept Formulae for ρ(·) Representative Reference ∼ 1970 Energy regularization ∥x∥2 2 [251] 1975-1985 Spatial smoothness ∥Lx∥2 2 or ∥Dvx∥2 2 + ∥Dhx∥2 2 [154] 1980-1985 Optimally Learned Transform ∥Tx∥2 2 = xT R−1x [37] where T/R is learned via PCA 1980-1990 Weighted smoothness ∥Lx∥2 W [246] 1990-2000 Robust statistics 1T µ{Lx} [20] e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Hubber-Markov 1992-2005 Total-Variation � v∈Ω |∇x(v)|dv [240] or 1T � |Dvx|2 + |Dhx|2 1987-2005 Other PDE-based options � v∈Ω g � ∇x(v), ∇2x(v) � dv [302] 2005-2009 Field-of-Experts � k λk1T µk{Lkx} [237] 1993-2005 Wavelet sparsity ∥Wx∥1 [76] 2000-2010 Self-similarity � k � j∈Ω(k) d{Rkx, Rjx} [32, 61] 2002-2012 Sparsity methods ∥α∥0 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' x = Dα [31] 2010-2017 Low-Rank assumption � k ∥XΩ(k)∥∗ [110] characterized by four major and interconnected trends: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A migration from the familiar Gaussian distribution to the less intuitive heavy-tailed ones;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A departure from L2 to sparsity-promoting norms, such as the L1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A drift from linear approximation techniques (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' PCA) to non-linear ones (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' wavelets and sparse modeling);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' and above all, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A replacement of axiomatic expressions with learned ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Landmark Denoisers over the Years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1 we brought a graph showing the PSNR performance of landmark denoising algo- rithms over the years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Below we provide more information on these techniques for completeness of this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' For each of these we bring the full reference, describe the core algorithmic idea, and provide the PSNR denoising performance on the BSD68 dataset (σ = 25).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' We should note that in choosing the methods to include in this list we restricted the scope to ones that report of BSD68 results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KSVD [89] [28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='28dB]: Elad, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Aharon, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image denoising via sparse and redundant representations over learned dictionaries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IEEE Transactions on Image processing, 15(12), 3736-3745.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method decomposes the noisy image into fully overlapping patches, and denoises each by sparse approximation via OMP [214], while learning an over-complete dic- IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 43 tionary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The denoised image is obtained by returning the cleaned patches to their original locations while averaging them over the overlaps and with a weighted version of the noisy image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' BM3D [61] [28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='57dB]: Dabov, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Foi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Katkovnik, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Egiazarian, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image denoising by sparse 3-D transform-domain collaborative filtering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IEEE Trans- actions on Image Processing, 16(8), 2080-2095.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This algorithm extracts all fully overlapping patches from the noisy image and gathers similar patches into 3D blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Denoising is performed by transforming these blocks, forcing sparsity, and then transforming the sparse outcome back to the image domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The denoised image is obtained by returning the patches to their original locations while averaging over the overlaps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This process is ran twice, where the first round serves for an initial cleaning that improves the patch correspondences for the later round.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' FoE [237] [27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='77dB]: Roth, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Black, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fields of experts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Interna- tional Journal of Computer Vision, 82(2), 205-229.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' FoE (appeared originally in 2005 [236]) builds a generic prior that mixes several reg- ularizers (called ”experts”).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The prior’s parameters are learned via a contrastive di- vergence penalty and MCMC sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The image denoising itself is obtained by an iterative algorithm that computes the MAP estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' LSSC [181] [28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='70dB]: Mairal, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Bach, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Ponce, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Sapiro, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Zisserman, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Non-local sparse models for image restoration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CVPR (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2272-2279).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This algorithm combines the sparse representations (as in KSVD) and non-local sim- ilarity (as in BM3D) concepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' It decomposes the noisy image into fully overlapping patches and groups similar patches together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These groups of patches are denoised by a joint sparse approximation that forces the same support over a learned dictionary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The denoised image is obtained by returning the patches to their original locations and averaging over the overlaps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' EPLL [347] [28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='71]: Zoran, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Weiss, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' From learning models of natural image patches to whole image restoration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ICCV (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 479-486).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' EPLL models the distribution of image patches as a Gaussian Mixture Model (GMM), and learns its parameters off-line with a dataset of clean images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Denoising with EPLL is a MAP estimation, posed as a minimization problem with a regularizer that consists of a sum of patch log-likelihoods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This task is solved by applying quadratic half-splitting and iterating over patch denoising and the whole image accumulation steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' MPL [33] [28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='96dB]: Burger, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Schuler, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Harmeling, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2012, June).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Image denoising: Can plain neural networks compete with BM3D?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='. CVPR (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2392- 2399).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 44 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN This is the first effective deep-learning based method for image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method extracts all fully overlapped patches as in classical algorithms, and filters each patch by applying a multi-layer Perceptron (fully connected network).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The reconstructed image is obtained by returning the patches to their locations and averaging over the overlapping regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CSF [252] [28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='74dB]: Schmidt, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Roth, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shrinkage fields for effective image restoration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CVPR (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2774-2781).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This algorithm poses a MAP estimation problem using a product of cascaded shrinkage functions as a local prior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The parameters of these functions are learned from a dataset as in FoE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The algorithm solves the obtained optimization by half-quadratic splitting and iterating between local and global optimization steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' WNNM [110] [28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='83dB]: Gu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zuo, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Feng, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Weighted nuclear norm minimization with application to image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CVPR (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2862- 2869).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method decomposes an incoming image into fully overlapping patches and groups similar patches arranging them as columns of a matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Denoising of the patches is performed by forcing the rank of the constructed matrices to be small by minimizing the matrix nuclear norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The reconstructed image is obtained by returning the patches to their original locations while averaging the overlaps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' TNRD [50] [28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='92dB]: Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Pock, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Trainable nonlinear reac- tion diffusion: A flexible framework for fast and effective image restoration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IEEE Transactions on Pattern Analysis and Machine Intelligence, 39(6), 1256-1272.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method builds on the FoE method, by unfolding the minimization over its prior and this way defining a parametric trainable network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Once the architecture is defined, TNRD trains this neural network end-to-end in a supervised fashion using clean/noisy pairs of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Denoising is a simple inference of the resulting machine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' DnCNN [330] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='23dB]: Zhang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zuo, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Meng, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IEEE Transactions on Image Processing, 26(7), 3142-3155.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This is the first deep learning method that outperforms classical algorithms by a con- siderable gap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' It filters images by applying a convolutional neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network architecture is composed of convolutional layers followed by batch normal- izations and ReLU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained end-to-end using a dataset consisting of noisy/clean image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IRCNN [331] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='15dB] Zhang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zuo, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Gu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Learning deep CNN denoiser prior for image restoration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CVPR (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3929-3938).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method is similar to DnCNN, but uses dilated convolutions within the architec- ture in order to enlarge the receptive field, thus creating an opportunity for a non-local IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 45 processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained end-to-end using a dataset consisting of noisy/clean image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' NLRN [167] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='41dB]: Liu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Wen, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Fan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Loy, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Huang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Non-local recurrent network for image restoration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' NeurIPS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method incorporates the non-local similarity concept into a convolutional recur- rent neural network in an explicit way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The denoising is done by recurrently applying convolutions and weighted averaging of similar regions (as in NLM [32]) in the feature space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained end-to-end using a dataset consisting of noisy/clean image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' MVCNN [168] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='41dB]: Liu, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zhang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Lin, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Zuo, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Multi-level wavelet-CNN for image restoration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CVPR Workshop (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 773-782).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This algorithm incorporates the wavelet sparsity concept into the deep learning ap- proach by combining the U-Net architecture with the multi-level wavelet transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' It replaces the downsampling and upsampling U-Net layers with the 2D discrete wavelet transform and it’s inverse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained end-to-end using a dataset consist- ing of noisy/clean image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' N3Net [218] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='30dB]: Pl¨otz, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Roth, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Neural nearest neighbors networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' NeurIPS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method combines the deep learning approach with the non-local self-similarity concept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method introduces a differentiable continuous relaxation of the k- nearest neighbor (KNN) selection rule and uses it as a building block within the neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' N3Net’s architecture interleaves convolutional blocks with KNN relaxation blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The convolutional blocks perform denoising, while the KNN parts augment the feature maps by breaking them into patches, applying patch matching, and finding k-nearest neighbors for each patch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained end-to-end using a dataset consisting of noisy/clean image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' FFDNet [332] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='19dB]: Zhang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zuo, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' FFDNet: Toward a fast and flexible solution for CNN-based image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IEEE Transactions on Image Processing, 27(9), 4608-4622.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While the architecture of this deep learning method resembles DnCNN, it enlarges the receptive field by reshaping the incoming image into four downsampled sub-images that are simultaneously fed into the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained end-to-end using a dataset consisting of noisy/clean image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' FOCNet [133] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='38dB] Jia, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Liu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Feng, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Focnet: A fractional optimal control network for image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CVPR (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6054-6063).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This algorithm suggests a novel architecture to replace the one used by DnCNN, rely- ing on an interpretation of residual neural networks as solvers of dynamical systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While DnCNN refers to integer-order ordinary differential equation, FOCNet’s archi- tecture poses a fractional optimal control (FOC) problem that translates into better 46 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN connectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The algorithm for solving the equation is implemented using a feed- forward convolutional neural network whose parameters are learned using a dataset of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' RIDNet [8] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='34dB]: Anwar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Barnes, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Real image denoising with feature attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CVPR (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3155-3164).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This algorithm introduces attention modules to a neural network whose architecture includes convolutional layers and skip connections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This attention is designed to cap- ture feature dependencies and enhance the weight of important correspondences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained end-to-end using a dataset of clean/noisy image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' GCDN [292] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='35dB]: Valsesia, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Fracastoro, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Magli, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Deep graph-convolutional image denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IEEE Transactions on Image Processing, 29, 8226-8237.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This method combines the deep-learning approach with graph modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The GCDN architecture includes convolutional and graph-convolutional layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' While regular con- volutional layers catch local interrelations between pixels, the graph-convolution ones are designed to capture the non-local dependencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Each graph-convolutional layer dynamically applies non-local aggregation (graph-convolution).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The graph is con- structed via a k-nearest neighbor whose vertices are feature vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Each vertex is connected to the k most similar ones in terms of the L2 norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained using a dataset of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' SwinIR [165] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='50dB]: Liang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Cao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Sun, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zhang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Van Gool, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Timofte, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Swinir: Image restoration using swin transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' CVPR (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1833-1844).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This algorithm incorporates non-locality into convolutional deep learning architecture using shifted window (Swin) transformer modules [171].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' These modules are designed to compute local self-attention in shifted windows, this way exploitig non-local self- similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The SwinIR architecture is trained end-to-end using a dataset consisting of noisy/clean image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' DRUNet [329] [29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='48dB]: Zhang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zuo, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Van Gool, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', & Timofte, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Plug-and-play image restoration with deep denoiser prior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IEEE Transactions on Pattern Analysis and Machine Intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' This denoiser is a bias-free [201] neural network that combines ResNet [117] and U- Net [234].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Its architecture includes convolutions, downscaling and upscaling layers, and skip connections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The network is trained using a dataset of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Appendix D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Approximation of the Score Function by an MMSE Denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' In Section 7 we brought the definition of the score function, ∇x log p(x), and its approxi- mation via a denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Here we bring the derivation of this result, following the work by Miyasawa [200], Stein [265], and Tweedie [84].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 47 Consider an ideal image x ∈ RN drawn from the Probability Density Function (PDF) p(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Assume that y is a noisy version of it, y = x + v, where v ∼ N(0, σ2 0I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The PDF of y can be obtained by a marginalization, p(y) = � x p(y|x)p(x)dx = � 1 2πσ2 0 �N/2 � x exp � −1 2σ2 0 ∥y − x∥2 2 � p(x)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='1) In the above we used the fact that p(y|x) ∼ N(x, σ2 0I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' The obtained relationship expresses p(y) as a convolution between the original prior p(x) and an isotropic zero-mean Gaussian of width σ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Taking a derivative of both sides with respect to y results in the following: ∇yp(y) = � 1 2πσ2 0 �N/2 � x ∇y exp � −1 2σ2 0 ∥y − x∥2 2 � p(x)dx (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2) = 1 σ2 0 � 1 2πσ2 0 �N/2 � x (y − x) exp � −1 2σ2 0 ∥y − x∥2 2 � p(x)dx = 1 σ2 0 � x (y − x)p(y|x)p(x)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dividing both sides by p(y) leads to ∇yp(y) p(y) = ∇y log p(y) = 1 σ2 0 � x (x − y)p(y|x)p(x) p(y) dx (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='3) = 1 σ2 0 � x (x − y)p(x|y)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Opening and rearranging the above expression leads to our final result: (D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='4) ∇y log p(y) = 1 σ2 0 �� x xp(x|y)dx − y � x p(x|y)dx � = 1 σ2 0 [D(y, σ0) − y] , where D(y, σ0) should be the optimal Minimum Mean Squared Error (MMSE) denoiser, E(x|y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thus, access to an approximation of the score function ∇x log p(x) can be obtained by using a small value σ0, and evaluating the above expression with a given denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 48 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN REFERENCES [1] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Abdelhamed, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lin, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Brown, A high-quality denoising dataset for smartphone cam- eras, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1692–1700.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [2] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Abdelhamed, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Timofte, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Brown, Ntire 2019 challenge on real image denoising: Methods and results, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [3] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aggarwal and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Majumdar, Hyperspectral image denoising using spatio-spectral total varia- tion, IEEE Geoscience and Remote Sensing Letters, 13 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 442–446.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [4] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aharon, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bruckstein, K-SVD: An algorithm for designing overcomplete dictio- naries for sparse representation, IEEE Transactions on signal processing, 54 (2006), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4311–4322.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [5] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ahmad, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bouman, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Buzzard, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Reehorst, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Schniter, Plug-and-play methods for magnetic resonance imaging: Using denoisers for image recovery, IEEE Signal Processing Magazine, 37 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 105–116.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [6] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Amit, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nachmani, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shaharbany, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wolf, Segdiff: Image segmentation with diffusion probabilistic models, arXiv preprint arXiv:2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='00390, (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [7] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Anscombe, The transformation of poisson, binomial and negative-binomial data, Biometrika, 35 (1948), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 246–254.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [8] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Anwar and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Barnes, Real image denoising with feature attention, in Proceedings of the IEEE/CVF international conference on computer vision, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3155–3164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [9] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arias and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Morel, Video denoising via empirical bayesian estimation of space-time patches, Journal of Mathematical Imaging and Vision, 60 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 70–93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [10] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arias and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Morel, Kalman filtering of patches for frame-recursive video denoising, in Pro- ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 0–0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arjovsky, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chintala, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bottou, Wasserstein generative adversarial networks, in Proceed- ings of the 34th International Conference on Machine Learning, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 70 of Proceedings of Machine Learning Research, PMLR, 06–11 Aug 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 214–223.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [12] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Azzari and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Foi, Variance stabilization for noisy+ estimate combination in iterative poisson denoising, IEEE signal processing letters, 23 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1086–1090.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [13] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bal, Dual tree complex wavelet transform based denoising of optical microscopy images, Biomedical optics express, 3 (2012), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3231–3239.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [14] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Balaji, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nah, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vahdat, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kreis, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aittala, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aila, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Laine, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Catanzaro, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', eDiff-I: Text-to-image diffusion models with an ensemble of expert denoisers, arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='01324, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [15] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Banham and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Katsaggelos, Digital image restoration, IEEE signal processing magazine, 14 (1997), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 24–41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [16] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Batson and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Royer, Noise2self: Blind denoising by self-supervision, in International Conference on Machine Learning, PMLR, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 524–533.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [17] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Beck and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Teboulle, Fast gradient-based algorithms for constrained total variation image de- noising and deblurring problems, IEEE Transactions on image processing, 18 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2419–2434.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [18] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bertalm´ıo, Denoising of photographic images and video: fundamentals, open challenges and new trends, Springer, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [19] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Besag, Markov chain monte carlo for statistical inference, Center for Statistics and the Social Sci- ences, 9 (2001), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 24–25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [20] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Black, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sapiro, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Marimont, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Heeger, Robust anisotropic diffusion, IEEE Transactions on image processing, 7 (1998), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 421–432.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [21] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blau, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ganz, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kawar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bronstein, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Threat model-agnostic adversarial defense using diffusion models, arXiv preprint arXiv:2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='08089, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [22] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blau and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Michaeli, The perception-distortion tradeoff, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6228–6237.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [23] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bosco, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bruna, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Giacalone, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Battiato, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rizzo, Signal-dependent raw image IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 49 denoising using sensor noise characterization via multiple acquisitions, in Digital Photography VI, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7537, SPIE, 2010, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 34–43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [24] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bottou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Stochastic gradient learning in neural networks, Proceedings of Neuro-Nımes, 91 (1991), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [25] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Boulanger, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kervrann, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bouthemy, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elbau, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sibarita, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Salamero, Patch- based nonlocal functional for denoising fluorescence microscopy image sequences, IEEE transactions on medical imaging, 29 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 442–454.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [26] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bouman and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sauer, A generalized gaussian image model for edge-preserving map estimation, IEEE Transactions on image processing, 2 (1993), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 296–310.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [27] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Boyd, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Parikh, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Peleato, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Eckstein, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Distributed optimization and sta- tistical learning via the alternating direction method of multipliers, Foundations and Trends® in Machine learning, 3 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–122.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [28] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Brifman, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Romano, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Turning a denoiser into a super-resolver using plug and play priors, in 2016 IEEE International Conference on Image Processing (ICIP), IEEE, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1404– 1408.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [29] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Brock, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Donahue, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Simonyan, Large scale GAN training for high fidelity natural image synthesis, in International Conference on Learning Representations, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [30] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Brooks, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mildenhall, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xue, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sharlet, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Barron, Unprocessing images for learned raw denoising, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 11036–11045.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [31] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bruckstein, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Donoho, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, From sparse solutions of systems of equations to sparse modeling of signals and images, SIAM review, 51 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 34–81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [32] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Buades, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Coll, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Morel, A non-local algorithm for image denoising, in IEEE CVPR, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2, 2005, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 60–65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [33] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Burger, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Schuler, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Harmeling, Image denoising: Can plain neural networks compete with BM3D?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', in IEEE CVPR, 2012, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2392–2399.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [34] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Buzzard, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sreehari, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bouman, Plug-and-play unplugged: Optimization-free reconstruction using consensus equilibrium, SIAM Journal on Imaging Sciences, 11 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2001–2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [35] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cand`es, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ma, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wright, Robust principal component analysis?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Journal of the ACM (JACM), 58 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [36] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cao, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tan, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gao, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Heng, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, A survey on generative diffusion model, arXiv preprint arXiv:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='02646, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [37] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Castleman, Digital image processing, Prentice Hall Press, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [38] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Catt´e, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lions, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Morel, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Coll, Image selective smoothing and edge detection by nonlinear diffusion, SIAM Journal on Numerical analysis, 29 (1992), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 182–193.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [39] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chambolle and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pock, A first-order primal-dual algorithm for convex problems with applications to imaging, Journal of mathematical imaging and vision, 40 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 120–145.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [40] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ho, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nikolova, Salt-and-pepper noise removal by median-type noise detectors and detail-preserving regularization, IEEE Transactions on image processing, 14 (2005), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1479–1485.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [41] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, Performance analysis of plug-and-play admm: A graph signal processing perspective, IEEE Transactions on Computational Imaging, 5 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 274–286.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [42] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elgendy, Plug-and-play ADMM for image restoration: Fixed-point convergence and applications, IEEE Transactions on Computational Imaging, 3 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 84–98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [43] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vetterli, Adaptive wavelet thresholding for image denoising and compression, IEEE transactions on image processing, 9 (2000), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1532–1546.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [44] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Charbonnier, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blanc-F´eraud, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aubert, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Barlaud, Deterministic edge-preserving regularization in computed imaging, IEEE Transactions on image processing, 6 (1997), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 298–311.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [45] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chatterjee and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, Is denoising dead?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', IEEE Transactions on Image Processing, 19 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 895–911.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [46] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chatterjee and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, Patch-based near-optimal image denoising, IEEE Transactions on Image Processing, 21 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1635–1649.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [47] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xiong, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tian, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wu, Deep boosting for image denoising, in Proceedings of the 50 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN European Conference on Computer Vision (ECCV), 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3–18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [48] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liao, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhou, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, Low-dose ct denoising with convolutional neural network, in 2017 IEEE 14th International Symposium on Biomedical Imaging (ISBI 2017), IEEE, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 143–146.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [49] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wipf, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rodrigues, Deep learning for linear inverse problems using the plug-and- play priors framework, in ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), IEEE, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 8098–8102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [50] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pock, Trainable nonlinear reaction diffusion: A flexible framework for fast and effective image restoration, IEEE transactions on pattern analysis and machine intelligence, 39 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1256–1272.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [51] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Christian, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vandehey, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Floberg, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mistretta, Dynamic pet denoising with hypr processing, Journal of Nuclear Medicine, 51 (2010), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1147–1154.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [52] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chu, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Glad, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Godtliebsen, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Marron, Edge-preserving smoothers for image processing, Journal of the American Statistical Association, 93 (1998), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 526–541.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [53] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chung, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kim, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mccann, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Klasky, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ye, Diffusion posterior sampling for general noisy inverse problems, arXiv preprint arXiv:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='14687, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [54] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chung, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sim, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ryu, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ye, Improving diffusion models for inverse problems using manifold constraints, arXiv preprint arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='00941, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [55] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cohen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blau, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Freedman, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rivlin, It has potential: Gradient-driven denoisers for convergent solutions to inverse problems, Advances in Neural Information Processing Systems, 34 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 18152–18164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [56] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cohen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, Regularization by denoising via fixed-point projection (RED- PRO), SIAM Journal on Imaging Sciences, 14 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1374–1406.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [57] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Coifman and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Donoho, Translation-invariant de-noising, in Wavelets and statistics, Springer, 1995, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 125–150.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [58] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Corbineau, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bertocchi, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chouzenoux, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Prato, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pesquet, Learned image deblurring by unfolding a proximal interior point algorithm, in 2019 IEEE International Conference on Image Processing (ICIP), IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4664–4668.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [59] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Costantini and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Susstrunk, Virtual sensor design, in Sensors and Camera Systems for Scientific, Industrial, and Digital Photography Applications V, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5301, SPIE, 2004, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 408–419.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [60] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Croitoru, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hondru, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ionescu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shah, Diffusion models in vision: A survey, arXiv preprint arXiv:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='04747, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [61] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dabov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Foi, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Katkovnik, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Egiazarian, Image denoising by sparse 3-D transform- domain collaborative filtering, IEEE Transactions on image processing, 16 (2007), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2080–2095.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [62] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dai, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Au, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zou, Film grain noise removal and synthesis in video coding, in 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, IEEE, 2010, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 890–893.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [63] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bruckstein, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Giryes, Postprocessing of compressed images via sequential denoising, IEEE Transactions on Image Processing, 25 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3044–3058.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [64] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Das, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chakrabarti, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Acharyya, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Basu, Adaptive denoising of 3d volumetric mr images using local variance based estimator, Biomedical Signal Processing and Control, 59 (2020), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 101901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [65] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Deledalle, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tupin, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Denis, Poisson nl means: Unsupervised non local means for poisson noise, in 2010 IEEE international conference on image processing, IEEE, 2010, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 801–804.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [66] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Deng, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dong, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Socher, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fei-Fei, ImageNet: A large-scale hierarchical image database, in 2009 IEEE Conference on Computer Vision and Pattern Recognition, 2009, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 248–255.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [67] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dey, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bhattacharjee, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nasipuri, Image denoising using generative adversarial network, in Intelligent Computing: Image Processing Based Applications, Springer, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 73–90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [68] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dhariwal and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nichol, Diffusion models beat GANs on image synthesis, in Thirty-Fifth Conference on Neural Information Processing Systems, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [69] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Divakar and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Venkatesh Babu, Image denoising via CNNs: An adversarial approach, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 80–87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 51 [70] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Diwakar, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kumar, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Singh, Ct image denoising using nlm and its method noise thresholding, Multimedia Tools and Applications, 79 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 14449–14464.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [71] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dong, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shi, Sparsity-based image denoising via dictionary learning and structural clustering, in CVPR 2011, 2011, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 457–464.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [72] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dong, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ma, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, Image restoration via simultaneous sparse coding: Where structured sparsity meets Gaussian scale mixture, International Journal of Computer Vision, 114 (2015), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 217–232.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [73] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dong, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yin, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shi, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wu, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lu, Denoising prior driven deep neural network for image restoration, IEEE Transactions on Pattern Analysis and Machine Intelligence, 41 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2305–2318.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [74] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dong, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shi, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, Nonlocally centralized sparse representation for image restora- tion, IEEE Transactions on Image Processing, 22 (2012), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1620–1630.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [75] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dong and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, A new directional weighted median filter for removal of random-valued impulse noise, IEEE Signal Processing Letters, 14 (2007), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 193–196.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [76] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Donoho, De-noising by soft-thresholding, IEEE transactions on information theory, 41 (1995), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 613–627.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [77] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Donoho and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Johnstone, Ideal spatial adaptation by wavelet shrinkage, biometrika, 81 (1994), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 425–455.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [78] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dosovitskiy, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Beyer, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kolesnikov, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Weissenborn, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhai, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Unterthiner, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' De- hghani, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Minderer, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Heigold, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gelly, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', An image is worth 16x16 words: Trans- formers for image recognition at scale, in International Conference on Learning Representations, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [79] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Du and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mordatch, Implicit generation and modeling with energy based models, in Advances in Neural Information Processing Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [80] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dudhane, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zamir, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Khan, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Khan, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, Burst image restoration and enhancement, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5759–5768.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [81] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dup´e, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fadili, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Starck, A proximal iteration for deconvolving poisson noisy images using sparse representations, IEEE Transactions on Image Processing, 18 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 310– 321.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [82] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dutta, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Leahy, and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, Non-local means denoising of dynamic pet images, PloS one, 8 (2013), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' e81390.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [83] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dutta, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Basarab, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Georgeot, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kouam´e, Deep unfolding of image denoising by quantum interactive patches, in 2022 IEEE International Conference on Image Processing (ICIP), IEEE, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 491–495.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [84] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Efron, Tweedie’s formula and selection bias, Journal of the American Statistical Association, 106 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1602–1614.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [85] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Egiazarian and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Katkovnik, Single image super-resolution via BM3D sparse coding, in IEEE European Signal Processing Conference (EUSIPCO), 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2849–2853.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [86] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ehret, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Davy, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arias, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Facciolo, Joint demosaicking and denoising by fine-tuning of bursts of raw images, in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 8868–8877.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [87] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, On the origin of the bilateral filter and ways to improve it, IEEE Transactions on image processing, 11 (2002), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1141–1151.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [88] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Sparse and redundant representations: from theory to applications in signal and image pro- cessing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2, Springer, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [89] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aharon, Image denoising via sparse and redundant representations over learned dictionaries, IEEE Transactions on Image processing, 15 (2006), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3736–3745.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [90] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Starck, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Querre, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Donoho, Simultaneous cartoon and texture image inpainting using morphological component analysis (mca), Applied and Computational Harmonic Analysis, 19 (2005), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 340–358.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [91] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fadili, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Starck, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bobin, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Moudden, Image decomposition and separation using sparse representations: an overview, Proceedings of the IEEE, 98 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 983–994.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [92] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fan, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fan, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Brief review of image denoising techniques, Visual Com- 52 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN puting for Industry, Biomedicine, and Art, 2 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [93] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figueiredo and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bioucas-Dias, Restoration of poissonian images using alternating di- rection optimization, IEEE transactions on Image Processing, 19 (2010), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3133–3145.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [94] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fletcher, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pandit, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rangan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sarkar, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Schniter, Plug-in estimation in high-dimensional linear inverse problems: A rigorous analysis, in Advances in Neural Information Processing Systems, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7440–7449.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [95] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gal, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Alaluf, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Atzmon, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Patashnik, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bermano, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chechik, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cohen-Or, An image is worth one word: Personalizing text-to-image generation using textual inversion, arXiv preprint arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='01618, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [96] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Geman and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Reynolds, Constrained restoration and the recovery of discontinuities, IEEE Trans- actions on pattern analysis and machine intelligence, 14 (1992), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 367–383.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [97] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Geman, Stochastic relaxation, gibbs distributions and bayesian restoration of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ieee trans, Pattn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Intell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', 6 (1984), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 721–741.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [98] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ghael, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sayeed, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Baraniuk, Improved wavelet denoising via empirical wiener filtering, in SPIE Technical Conference on Wavelet Applications in Signal Processing, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [99] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gilboa and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Osher, Nonlocal operators with applications to image processing, Multiscale Modeling & Simulation, 7 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1005–1028.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [100] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Giryes and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Sparsity-based poisson denoising with dictionary learning, IEEE Transactions on Image Processing, 23 (2014), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5057–5069.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [101] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Glorot, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bordes, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bengio, Deep sparse rectifier neural networks, in Proceedings of the fourteenth international conference on artificial intelligence and statistics, JMLR Workshop and Conference Proceedings, 2011, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 315–323.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [102] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Godard, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Matzen, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Uyttendaele, Deep burst denoising, in Proceedings of the European conference on computer vision (ECCV), 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 538–554.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [103] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G¨okda˘g, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S¸ansal, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G¨okdel, Image denoising using 2-d wavelet algorithm for gaussian-corrupted confocal microscopy images, Biomedical Signal Processing and Control, 54 (2019), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 101594.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [104] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Golub, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hansen, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' O’Leary, Tikhonov regularization and total least squares, SIAM Journal on Matrix Analysis and Applications, 21 (1999), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 185–194.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [105] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gong, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Guan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Qi, Pet image denoising using a deep neural network through fine tuning, IEEE Transactions on Radiation and Plasma Medical Sciences, 3 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 153–161.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [106] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gonzalez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Preciozzi, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mus´e, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Almansa, Joint denoising and decompression using cnn regularization, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2598–2601.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [107] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Goodfellow, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pouget-Abadie, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mirza, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Warde-Farley, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ozair, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Courville, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bengio, Generative adversarial nets, Advances in Neural Information Pro- cessing Systems, 27 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [108] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Goossens, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pizurica, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Philips, Removal of correlated noise by modeling the signal of interest in the wavelet domain, IEEE transactions on image processing, 18 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1153–1165.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [109] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gregor and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' LeCun, Learning fast approximations of sparse coding, in Proceedings of the 27th international conference on international conference on machine learning, 2010, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 399–406.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [110] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Feng, Weighted nuclear norm minimization with application to image denoising, in Proceedings of the IEEE conference on computer vision and pattern recognition, 2014, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2862–2869.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [111] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Guichard, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Moisan, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Morel, A review of pde models in image processing and image analysis, In Journal de Physique IV, 12 (2002), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 137–154.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [112] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gulrajani, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ahmed, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arjovsky, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dumoulin, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Courville, Improved training of Wasserstein GANs, Advances in neural information processing systems, 30 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [113] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Guo, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liang, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Joint denoising and demosaicking with green channel prior for real-world burst images, IEEE Transactions on Image Processing, 30 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6930–6942.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [114] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Guo, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yan, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Toward convolutional blind denoising of real photographs, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1712–1722.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [115] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gurrola-Ramos, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dalmau, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Alarc´on, A residual dense u-net neural network for IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 53 image denoising, IEEE Access, 9 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 31742–31754.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [116] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Han, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zheng, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhou, CARD: Classification and regression diffusion models, arXiv preprint arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='07275, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [117] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' He, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ren, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, Deep residual learning for image recognition, in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 770–778.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [118] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hinton, Training products of experts by minimizing contrastive divergence, Neural computation, 14 (2002), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1771–1800.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [119] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ho, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Saharia, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Whang, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gao, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gritsenko, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kingma, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Poole, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Norouzi, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fleet, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Imagen video: High definition video generation with diffusion models, arXiv preprint arXiv:2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='02303, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [120] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ho, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jain, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Abbeel, Denoising diffusion probabilistic models, in Advances in Neural Information Processing Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 33, Curran Associates, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6840–6851.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [121] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ho, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Saharia, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fleet, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Norouzi, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Salimans, Cascaded diffusion models for high fidelity image generation, Journal of Machine Learning Research, 23 (2022), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1– 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [122] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ho and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Salimans, Classifier-free diffusion guidance, in NeurIPS 2021 Workshop on Deep Gen- erative Models and Downstream Applications, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [123] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hong, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Romano, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Acceleration of RED via vector extrapolation, Journal of Visual Communication and Image Representation, 63 (2019), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 102575.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [124] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hu, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ma, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cai, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhao, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, Pseudo 3d auto-correlation network for real image denoising, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 16175–16184.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [125] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tan, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Unfolding the alternating optimization for blind super resolution, Advances in Neural Information Processing Systems, 33 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5632–5643.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [126] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huber, Robust statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' new-york: John wiley and sons, HuberRobust statistics1981, (1981).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [127] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ibragimov, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Linnik, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kingman, Independent and stationary sequences of random variables, Monographs and textbooks on pure and applied mathematics, Wolters-Noordhoff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', 1971.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [128] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ignatov, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Byeoung-Su, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Timofte, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pouget, Fast camera image denoising on mo- bile gpus with deep learning, mobile ai 2021 challenge: Report, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2515–2524.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [129] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jain, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xie, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Abbeel, Vectorfusion: Text-to-svg by abstracting pixel-based diffusion models, arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='11319, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [130] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jain and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tyagi, A survey of edge-preserving image denoising methods, Information Systems Frontiers, 18 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 159–170.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [131] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jain and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Seung, Natural image denoising with convolutional networks, Advances in neural information processing systems, 21 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [132] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jaynes, Probability theory: The logic of science, Cambridge university press, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [133] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jia, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Feng, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Focnet: A fractional optimal control network for image denoising, 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6047–6056.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [134] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jin and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ye, Annihilating filter-based low-rank hankel matrix approach for image inpainting, IEEE Transactions on Image Processing, 24 (2015), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3498–3511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [135] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jolicoeur-Martineau, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pich´e-Taillefer, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kachman, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mitliagkas, Gotta go fast when generating data with score-based models, arXiv preprint arXiv:2105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='14080, (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [136] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jolliffe and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cadima, Principal component analysis: a review and recent developments, Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering Sciences, 374 (2016), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 20150202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [137] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Joyce and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zalta, Bayes’ theorem, The Stanford Encyclopedia of Philosophy, 28 (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [138] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kadkhodaie and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Simoncelli, Stochastic solutions for linear inverse problems using the prior implicit in a denoiser, Advances in Neural Information Processing Systems, 34 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 13242– 13254.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [139] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kamilov, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mansour, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wohlberg, A plug-and-play priors approach for solving non- linear imaging inverse problems, IEEE Signal Processing Letters, 24 (2017), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1872–1876.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 54 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN [140] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Karhunen, ¨Uber lineare methoden in der wahrscheinlichkeitsrechnung, Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fennicea, A137 (1947).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [141] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Karras, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Laine, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aittala, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hellsten, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lehtinen, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aila, Analyzing and improving the image quality of stylegan, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 8110–8119.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [142] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kawar, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ermon, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, Denoising diffusion restoration models, in Advances in Neural Information Processing Systems, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [143] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kawar, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ganz, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Enhancing diffusion-based image synthesis with robust classifier guidance, arXiv preprint arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='08664, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [144] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kawar, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ermon, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, JPEG artifact correction using denoising diffusion restoration models, in Neural Information Processing Systems (NeurIPS) Workshop on Score-Based Methods, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [145] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kawar, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vaksman, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, SNIPS: solving noisy inverse problems stochastically, Advances in Neural Information Processing Systems, 34 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 21757–21769.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [146] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kawar, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vaksman, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Stochastic image denoising by sampling from the posterior distribution, in Proceedings of the IEEE/CVF International Conference on Computer Vision Work- shops, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1866–1875.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [147] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kawar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zada, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lang, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tov, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dekel, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mosseri, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Irani, Imagic: Text-based real image editing with diffusion models, arXiv preprint arXiv:2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='09276, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [148] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Khowaja, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yahya, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lee, Cascaded and recursive convnets (crcnn): An effective and flexible approach for image denoising, Signal Processing: Image Communication, 99 (2021), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 116420.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [149] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kim, An image denoising algorithm for the mobile phone cameras, The Journal of the Korea institute of electronic communication sciences, 9 (2014), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 601–608.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [150] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kingma and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dhariwal, Glow: Generative flow with invertible 1x1 convolutions, Advances in neural information processing systems, 31 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [151] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kingma and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Welling, Auto-encoding variational bayes, in International Conference on Learning Representations, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [152] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Krull, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Buchholz, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jug, Noise2void-learning denoising from single noisy images, in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2129–2137.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [153] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kutyniok and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lim, Image separation using wavelets and shearlets, in International Confer- ence on Curves and Surfaces, Springer, 2010, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 416–430.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [154] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lagendijk and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Biemond, Basic methods for image restoration and identification, in The Essential Guide to Image Processing, Elsevier, 2009, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 323–348.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [155] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Laumont, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' De Bortoli, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Almansa, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Delon, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Durmus, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pereyra, Bayesian imaging using plug & play priors: when Langevin meets Tweedie, arXiv preprint arXiv:2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='04715, (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [156] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lebrun, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Colom, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Buades, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Morel, Secrets of image denoising cuisine, Acta Numerica, 21 (2012), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 475–576.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [157] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lebrun, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Colom, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Morel, The noise clinic: A universal blind denoising algorithm, in 2014 IEEE International Conference on Image Processing (ICIP), IEEE, 2014, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2674–2678.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [158] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lee, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Negishi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Urakubo, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kasai, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ishii, Mu-net: Multi-scale u-net for two-photon microscopy image denoising and restoration, Neural Networks, 125 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 92–103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [159] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lefkimmiatis, Universal denoising networks: a novel cnn architecture for image denoising, in Pro- ceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3204–3213.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [160] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lehtinen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Munkberg, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hasselgren, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Laine, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Karras, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aittala, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aila, Noise2noise: Learning image restoration without clean data, in International Conference on Machine Learning, PMLR, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2965–2974.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [161] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lei, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xing, and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, Blind video temporal consistency via deep video prior, ArXiv, abs/2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='11838 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [162] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Levin and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nadler, Natural image denoising: Optimality and inherent bounds, in IEEE CVPR, 2011, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2833–2840.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [163] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Levin, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nadler, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Durand, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Freeman, Patch complexity, finite pixel correlations IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 55 and optimal denoising, in European Conference on Computer Vision, Springer, 2012, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 73–86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [164] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Trzasko, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lake, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blezek, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fletcher, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' McCollough, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Manduca, Adaptive nonlocal means filtering based on local noise level for ct denoising, Medical physics, 41 (2014), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 011908.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [165] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cao, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Van Gool, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Timofte, SwinIR: Image restoration using swin transformer, in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1833–1844.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [166] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Guo, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, A decoupled learning scheme for real-world burst denoising from raw images, in European Conference on Computer Vision, Springer, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 150–166.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [167] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Loy, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, Non-local recurrent network for image restoration, in Advances in Neural Information Processing Systems, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1673–1682.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [168] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lin, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, Multi-level wavelet-cnn for image restoration, 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 886–88609.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [169] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tanaka, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Okutomi, Single-image noise level estimation for blind denoising, IEEE transactions on image processing, 22 (2013), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5226–5237.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [170] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Qin, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Anwar, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ji, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kim, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Caldwell, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gedeon, Invertible denoising network: A light solution for real noise removal, in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 13365–13374.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [171] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lin, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wei, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lin, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Guo, Swin transformer: Hierarchical vision transformer using shifted windows, 2021 IEEE/CVF International Conference on Computer Vision (ICCV), (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 9992–10002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [172] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Luo, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tang, Deep learning face attributes in the wild, in Proceedings of the IEEE International Conference on Computer Vision, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3730–3738.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [173] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yuan, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Uyttendaele, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, Fast burst images denoising, ACM Transactions on Graphics (TOG), 33 (2014), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [174] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lo´eve, Fonctions al´eatoires de second order, CR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Paris, 220 (1945).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [175] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Luisier, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Unser, A new sure approach to image denoising: Interscale orthonormal wavelet thresholding, IEEE Transactions on image processing, 16 (2007), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 593–606.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [176] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Luisier, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Unser, Image denoising in mixed poisson–gaussian noise, IEEE Trans- actions on image processing, 20 (2010), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 696–708.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [177] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Luisier, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vonesch, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Unser, Fast interscale wavelet denoising of poisson-corrupted images, Signal processing, 90 (2010), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 415–427.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [178] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Luo and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hu, Score-based point cloud denoising, in Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4583–4592.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [179] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Maggioni, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Boracchi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Foi, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Egiazarian, Video denoising using separable 4d nonlocal spatiotemporal transforms, in Image Processing: Algorithms and Systems IX, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7870, International Society for Optics and Photonics, 2011, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 787003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [180] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Maggioni, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xiao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fu, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, Efficient multi-stage video denoising with recurrent spatio-temporal fusion, 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3465–3474.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [181] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mairal, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bach, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ponce, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sapiro, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zisserman, Non-local sparse models for image restoration, in IEEE 12th international conference on computer vision, 2009, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2272–2279.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [182] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mairal, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sapiro, Sparse representation for color image restoration, IEEE Trans- actions on Image Processing, 17 (2008), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 53–69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [183] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mairal, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sapiro, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Multiscale sparse image representation with learned dictionaries, in 2007 IEEE International Conference on Image Processing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3, IEEE, 1997, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' III–105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [184] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Makitalo and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Foi, Optimal inversion of the anscombe transformation in low-count poisson image denoising, IEEE transactions on Image Processing, 20 (2010), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 99–109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [185] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mallat, A wavelet tour of signal processing, Elsevier, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [186] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Manifold, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Thomas, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Francis, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hill, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fu, Denoising of stimulated raman scattering microscopy images via deep learning, Biomedical optics express, 10 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3860–3874.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [187] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Manj´on, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Coup´e, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mart´ı-Bonmat´ı, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Collins, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Robles, Adaptive non-local 56 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN means denoising of mr images with spatially varying noise levels, Journal of Magnetic Resonance Imaging, 31 (2010), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 192–203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [188] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mannam, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhu, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nichols, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sundaresan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Smith, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bohn, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Howard, Real-time image denoising of mixed poisson–gaussian noise in fluorescence microscopy images using imagej, Optica, 9 (2022), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 335–345.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [189] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Marinˇc, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Srinivasan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G¨ul, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hellge, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Samek, Multi-kernel prediction networks for denoising of burst images, in 2019 IEEE International Conference on Image Processing (ICIP), IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2404–2408.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [190] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Martin, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fowlkes, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tal, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Malik, A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics, Proceedings Eighth IEEE International Conference on Computer Vision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ICCV 2001, 2 (2001), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 416–423 vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [191] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Martinec, Noise, dynamic range and bit depth in digital slrs, The University of Chicago, (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [192] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mataev, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, DeepRED: Deep image prior powered by RED, in Proceedings of the IEEE International Conference on Computer Vision Workshops, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 0–0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [193] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Matrecano, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Poggi, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Verdoliva, Improved bm3d for correlated noise removal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', in VISAPP (1), 2012, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 129–134.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [194] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Meinhardt, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Moller, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hazirbas, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cremers, Learning proximal operators: Using de- noising networks for regularizing inverse imaging problems, in Proceedings of the IEEE International Conference on Computer Vision, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1781–1790.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [195] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Meng and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kabashima, Diffusion model based posterior sampling for noisy linear inverse prob- lems, arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='12343, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [196] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Metzler, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Maleki, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Baraniuk, From denoising to compressed sensing, IEEE Transactions on Information Theory, 62 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5117–5144.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [197] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, A tour of modern image filtering: New insights and methods, both practical and theoretical, IEEE signal processing magazine, 30 (2012), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 106–128.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [198] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mildenhall, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Barron, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sharlet, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ng, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Carroll, Burst denoising with kernel prediction networks, in Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2502–2510.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [199] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Miskin and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' MacKay, Ensemble learning for blind image separation and deconvolution, in Advances in independent component analysis, Springer, 2000, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 123–141.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [200] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Miyasawa, An empirical Bayes estimator of the mean of a normal population, Bull.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Internat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Statist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', 38 (1961), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 181–188.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [201] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mohan, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kadkhodaie, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Simoncelli, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fernandez-Granda, Robust and interpretable blind image denoising via bias-free convolutional neural networks, in International Conference on Learning Representations, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [202] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mokady, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hertz, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aberman, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pritch, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cohen-Or, Null-text inversion for editing real images using guided diffusion models, arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='09794, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [203] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mosseri, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zontak, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Irani, Combining the power of internal and external denoising, in IEEE international conference on computational photography (ICCP), IEEE, 2013, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [204] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mou, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Deep generalized unfolding networks for image restoration, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 17399–17410.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [205] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Murphy, Machine learning: a probabilistic perspective, MIT press, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [206] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Neville and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dimopoulos, Wavelet denoising of coarsely quantized signals, IEEE Transactions on Instrumentation and Measurement, 55 (2006), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 892–901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [207] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nguyen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ulfarsson, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sveinsson, Hyperspectral image denoising using sure- based unsupervised convolutional neural networks, IEEE Transactions on Geoscience and Remote Sensing, 59 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3369–3382.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [208] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nichol and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dhariwal, Improved denoising diffusion probabilistic models, in International Conference on Machine Learning, PMLR, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 8162–8171.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [209] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nie, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Guo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xiao, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vahdat, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Anandkumar, Diffusion models for adversarial purification, in International Conference on Machine Learning (ICML), 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [210] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nikolova, A variational approach to remove outliers and impulse noise, Journal of Mathematical IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 57 Imaging and Vision, 20 (2004), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 99–120.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [211] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ohayon, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Adrai, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Michaeli, Reasons for the superiority of stochastic es- timators over deterministic ones: Robustness, consistency and perceptual quality, arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='08944, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [212] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ohayon, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Adrai, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vaksman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, High perceptual quality image de- noising with a posterior sampling CGAN, in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1805–1813.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [213] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pan, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhou, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tian, Extreme generative image compression by learning text embedding from diffusion models, arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='07793, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [214] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pati, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rezaiifar, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Krishnaprasad, Orthogonal matching pursuit: Recursive func- tion approximation with applications to wavelet decomposition, in Proceedings of 27th Asilomar conference on signals, systems and computers, IEEE, 1993, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 40–44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [215] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pearson, On lines and planes of closest fit to systems of points in space, The London, Edinburgh, and Dublin philosophical magazine and journal of science, 2 (1901), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 559–572.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [216] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Perona and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Malik, Scale-space and edge detection using anisotropic diffusion, IEEE Transactions on pattern analysis and machine intelligence, 12 (1990), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 629–639.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [217] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pishro-Nik, Introduction to probability, statistics and random processes, (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [218] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pl¨otz and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Roth, Neural nearest neighbors networks, in Neural Information Processing Systems, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [219] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ponomarenko, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lukin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zelensky, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Astola, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Egiazarian, Adaptive dct-based filtering of images corrupted by spatially correlated noise, in Image processing: algorithms and systems VI, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6812, SPIE, 2008, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 285–295.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [220] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Poole, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jain, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Barron, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mildenhall, Dreamfusion: Text-to-3d using 2d diffusion, arXiv preprint arXiv:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='14988, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [221] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Portilla, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Strela, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wainwright, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Simoncelli, Image denoising using scale mixtures of gaussians in the wavelet domain, IEEE Transactions on Image processing, 12 (2003), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1338–1351.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [222] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Radford, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Metz, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chintala, Unsupervised representation learning with deep convolutional generative adversarial networks, in 4th International Conference on Learning Representations, ICLR, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [223] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rajan, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kannan, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kaimal, An improved hybrid model for molecular image denoising, Journal of Mathematical Imaging and Vision, 31 (2008), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 73–79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [224] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ramesh, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dhariwal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nichol, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, Hierarchical text-conditional image generation with clip latents, arXiv preprint arXiv:2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='06125, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [225] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Reehorst and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Schniter, Regularization by denoising: Clarifications and new interpretations, IEEE Transactions on computational imaging, 5 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 52–67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [226] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Remez, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Litany, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Giryes, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bronstein, Class-aware fully convolutional gaussian and poisson denoising, IEEE Transactions on Image Processing, 27 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5707–5722.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [227] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Reymann, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' W¨urfl, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ritt, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Stimpel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cachovan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vija, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Maier, U-net for SPECT image denoising, in 2019 IEEE Nuclear Science Symposium and Medical Imaging Conference (NSS/MIC), IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [228] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rezende and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mohamed, Variational inference with normalizing flows, in International Confer- ence on Machine Learning, PMLR, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1530–1538.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [229] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rick Chang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Poczos, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vijaya Kumar, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sankaranarayanan, One net- work to solve them all: Solving linear inverse problems using deep projection models, in Proceedings of the IEEE International Conference on Computer Vision, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5888–5897.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [230] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Roberts, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tweedie, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Exponential convergence of langevin distributions and their discrete approximations, Bernoulli, 2 (1996), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 341–363.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [231] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Romano, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, The little engine that could: Regularization by denoising (RED), SIAM Journal on Imaging Sciences, 10 (2017), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1804–1844.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [232] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rombach, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Blattmann, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lorenz, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Esser, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ommer, High-resolution image synthesis with latent diffusion models, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 10684–10695.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [233] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rond, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Giryes, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Poisson inverse problems by the plug-and-play scheme, Journal 58 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN of Visual Communication and Image Representation, 41 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 96–108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [234] O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ronneberger, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fischer, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Brox, U-net: Convolutional networks for biomedical image segmentation, in International Conference on Medical image computing and computer-assisted in- tervention, Springer, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 234–241.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [235] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rossi, Mathematical statistics: an introduction to likelihood based inference, John Wiley & Sons, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [236] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Roth and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Black, Fields of experts: a framework for learning image priors, 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR’05), 2 (2005), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 860–867 vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [237] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Roth and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Black, Fields of experts, International Journal of Computer Vision, 82 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 205–229.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [238] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rousseeuw, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hampel, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ronchetti, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Stahel, Robust statistics: the approach based on influence functions, John Wiley & Sons, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [239] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ruderman, The statistics of natural images, Network: Computation in Neural Systems, 5 (1994), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 517–548.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [240] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rudin, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Osher, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fatemi, Nonlinear total variation based noise removal algorithms, Physica D: nonlinear phenomena, 60 (1992), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 259–268.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [241] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ruiz, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jampani, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pritch, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rubinstein, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aberman, Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation, arXiv preprint arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='12242, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [242] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rumelhart, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hinton, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Williams, Learning representations by back-propagating errors, nature, 323 (1986), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 533–536.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [243] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Saharia, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lee, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ho, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Salimans, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fleet, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Norouzi, Palette: Image-to-image diffusion models, in ACM SIGGRAPH 2022 Conference Proceedings, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [244] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Saharia, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Saxena, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Whang, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Denton, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ghasemipour, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ayan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mahdavi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lopes, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', Photorealistic text-to-image diffusion models with deep language understanding, arXiv preprint arXiv:2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='11487, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [245] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Saharia, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ho, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chan, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Salimans, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fleet, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Norouzi, Image super-resolution via iterative refinement, IEEE Transactions on Pattern Analysis and Machine Intelligence, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [246] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Saint-Marc, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Medioni, Adaptive smoothing: A general tool for early vision, IEEE Transactions on Pattern Analysis & Machine Intelligence, 13 (1991), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 514–529.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [247] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Salimans and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ho, Progressive distillation for fast sampling of diffusion models, arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='00512, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [248] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Salmon, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Harmany, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Deledalle, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Willett, Poisson noise reduction with non-local pca, Journal of mathematical imaging and vision, 48 (2014), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 279–294.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [249] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sauer, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Schwarz, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Geiger, StyleGAN-XL: Scaling StyleGAN to large diverse datasets, arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='00273, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [250] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Scetbon, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, Deep K-SVD denoising, IEEE Transactions on Image Processing, 30 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5944–5955.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [251] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Schafer, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mersereau, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Richards, Constrained iterative restoration algo- rithms, Proceedings of the IEEE, 69 (1981), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 432–450.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [252] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Schmidt and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Roth, Shrinkage fields for effective image restoration, 2014 IEEE Conference on Computer Vision and Pattern Recognition, (2014), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2774–2781.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [253] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shi, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' De Bortoli, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Deligiannidis, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Doucet, Conditional simulation using diffusion Schr¨odinger bridges, arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='13460, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [254] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sinha, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Meng, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ermon, D2c: Diffusion-decoding models for few-shot conditional generation, Advances in Neural Information Processing Systems, 34 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 12533–12548.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [255] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sochen, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kimmel, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Malladi, A general framework for low level vision, IEEE transactions on image processing, 7 (1998), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 310–318.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [256] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Soh and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cho, Deep universal blind image denoising, in 2020 25th International Conference on Pattern Recognition (ICPR), IEEE, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 747–754.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [257] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sohl-Dickstein, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Weiss, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Maheswaranathan, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ganguli, Deep unsupervised learning using nonequilibrium thermodynamics, in International Conference on Machine Learning, PMLR, IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 59 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2256–2265.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [258] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Meng, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ermon, Denoising diffusion implicit models, in International Conference on Learning Representations, April 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [259] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Aydin, Tempformer: Temporally consistent transformer for video denoising, in European Conference on Computer Vision, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [260] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ermon, Generative modeling by estimating gradients of the data distribution, in Advances in Neural Information Processing Systems, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 11918–11930.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [261] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ermon, Improved techniques for training score-based generative models, in Advances in Neural Information Processing Systems, 33, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [262] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sohl-Dickstein, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kingma, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kumar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ermon, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Poole, Score-based generative modeling through stochastic differential equations, in International Conference on Learning Representations, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [263] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sreehari, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Venkatakrishnan, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wohlberg, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Buzzard, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Drummy, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Simmons, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bouman, Plug-and-play priors for bright field electron tomography and sparse interpolation, IEEE Transactions on Computational Imaging, 2 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 408–423.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [264] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Srinivasan and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ebenezer, A new fast and efficient decision-based algorithm for removal of high-density impulse noises, IEEE Signal Processing Letters, 14 (2007), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 189–192.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [265] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Stein, Estimation of the mean of a multivariate normal distribution, The annals of Statistics, (1981), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1135–1151.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [266] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Du, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mok, Pix2pix generative adversarial network for low dose myocardial perfusion SPECT denoising, Quantitative Imaging in Medicine and Surgery, 12 (2022), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3539.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [267] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kamilov, Block coordinate regularization by denoising, in Advances in Neural Information Processing Systems, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 380–390.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [268] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wohlberg, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kamilov, An online plug-and-play algorithm for regularized image reconstruction, IEEE Transactions on Computational Imaging, 5 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 395–408.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [269] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wohlberg, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kamilov, Scalable plug-and-play admm with convergence guarantees, IEEE Transactions on Computational Imaging, 7 (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 849–863.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [270] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tai, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, Memnet: A persistent memory network for image restoration, in Proceedings of the IEEE international conference on computer vision, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4539–4547.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [271] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Takeda, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Farsiu, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, Kernel regression for image processing and reconstruction, IEEE Transactions on image processing, 16 (2007), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 349–366.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [272] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Talbot, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Phelippeau, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Akil, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bara, Efficient poisson denoising for photography, in 2009 16th IEEE International Conference on Image Processing (ICIP), IEEE, 2009, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3881–3884.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [273] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Talebi and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, Global image denoising, IEEE Transactions on Image Processing, 23 (2013), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 755–768.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [274] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tasdizen, Principal neighborhood dictionaries for nonlocal means image denoising, IEEE Transac- tions on Image Processing, 18 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2649–2660.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [275] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tassano, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Delon, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Veit, Dvdnet: A fast network for deep video denoising, in 2019 IEEE International Conference on Image Processing (ICIP), IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1805–1809.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [276] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tassano, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Delon, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Veit, Fastdvdnet: Towards real-time deep video denoising without flow estimation, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1354–1363.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [277] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tchebycheff, Sur deux th´eor`emes relatifs aux probabilit´es, Acta Mathematica, 14 (1890), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 305 – 315.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [278] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Teodoro, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bioucas-Dias, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figueiredo, Scene-adapted plug-and-play algorithm with convergence guarantees, in IEEE 27th International Workshop on Machine Learning for Signal Processing (MLSP), 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [279] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Teodoro, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bioucas-Dias, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figueiredo, A convergent image fusion algorithm using scene-adapted Gaussian-mixture-based denoising, IEEE Transactions on Image Processing, 28 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 451–463.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [280] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Teodoro, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bioucas-Dias, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Figueiredo, Image restoration and reconstruc- tion using targeted plug-and-play priors, IEEE Transactions on Computational Imaging, 5 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 675–686.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 60 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN [281] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tian, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fei, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zheng, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lin, Deep learning on image denoising: An overview, Neural Networks, 131 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 251–275.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [282] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tikhonov and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arsenin, Solution of Ill-posed Problems, Washington: Winston & Sons, 1977.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [283] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tirer and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Giryes, Image restoration by iterative denoising and backward projections, IEEE Transactions on Image Processing, 28 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1220–1234.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [284] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tomasi and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Manduchi, Bilateral filtering for gray and color images, in Sixth international conference on computer vision (IEEE Cat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 98CH36271), IEEE, 1998, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 839–846.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [285] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tran, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nguyen, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arai, Gan-based noise model for denoising real images, in Proceedings of the Asian Conference on Computer Vision, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [286] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ulyanov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vedaldi, and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Lempitsky, Deep image prior, in Proceedings of the IEEE CVPR, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 9446–9454.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [287] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vahdat, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kreis, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kautz, Score-based generative modeling in latent space, in Neural Information Processing Systems (NeurIPS), 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [288] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vaksman and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Patch-craft self-supervised training for correlated image denoising, arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='09919, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [289] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vaksman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, LIDIA: Lightweight learned image denoising with instance adaptation, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recog- nition Workshops, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 524–525.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [290] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vaksman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, Patch craft: Video denoising by deep modeling and patch matching, 2021 IEEE/CVF International Conference on Computer Vision (ICCV), (2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2137– 2146.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [291] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vaksman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zibulevsky, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Elad, Patch ordering as a regularization for inverse problems in image processing, SIAM Journal on Imaging Sciences, 9 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 287–319.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [292] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Valsesia, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fracastoro, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Magli, Deep graph-convolutional image denoising, IEEE Trans- actions on Image Processing, 29 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 8226–8237.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [293] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Van Den Oord, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kalchbrenner, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kavukcuoglu, Pixel recurrent neural networks, in International Conference on Machine Learning, PMLR, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1747–1756.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [294] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vatsa, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Singh, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Noore, Denoising and segmentation of 3d brain images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=', IPCV, 9 (2009), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 561–567.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [295] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Venkatakrishnan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bouman, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wohlberg, Plug-and-play priors for model based reconstruction, in 2013 IEEE Global Conference on Signal and Information Processing, IEEE, 2013, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 945–948.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [296] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Vincent, A connection between score matching and denoising autoencoders, Neural computation, 23 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1661–1674.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [297] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhou, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, Adaptive non-local means filter for image deblocking, Signal Processing: Image Communication, 28 (2013), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 522–530.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [298] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, Practical deep raw image denoising on mobile devices, in European Conference on Computer Vision, Springer, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1–16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [299] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bovik, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sheikh, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Simoncelli, Image quality assessment: from error visibility to structural similarity, IEEE transactions on image processing, 13 (2004), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 600–612.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [300] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Progressive switching median filter for the removal of impulse noise from highly corrupted images, IEEE Transactions on Circuits and Systems Ii: Analog and Digital Signal Processing, 46 (1999), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 78–80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [301] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wei, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, A physics-based noise formation model for extreme low- light raw denoising, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2758–2767.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [302] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Weickert, Anisotropic diffusion in image processing, Stuttgart: Teubner, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [303] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Whang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Delbracio, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Talebi, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Saharia, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Dimakis, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Milanfar, Deblurring via stochastic refinement, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 16293–16303.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [304] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wiener, Extrapolation, interpolation, and smoothing of stationary time series: with engineering applications, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 113, MIT press Cambridge, MA, 1949.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [305] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wright, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ganesh, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Rao, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Peng, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ma, Robust principal component analysis: Exact recovery of corrupted low-rank matrices via convex optimization, Advances in neural information IMAGE DENOISING: THE DEEP LEARNING REVOLUTION AND BEYOND 61 processing systems, 22 (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [306] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xie, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, Image denoising and inpainting with deep neural networks, in NIPS, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [307] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, A trilateral weighted sparse coding scheme for real-world image denoising, in Proceedings of the European conference on computer vision (ECCV), 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 20–36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [308] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Denoising convolutional neural network, in 2015 IEEE International Conference on Information and Automation, IEEE, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1184–1187.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [309] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wohlberg, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kamilov, Provable convergence of plug-and-play priors with MMSE denoisers, arXiv preprint arXiv:2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='07685, (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [310] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yair and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Michaeli, Multi-scale weighted nuclear norm image restoration, in Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3165–3174.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [311] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yan, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Campisi, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hatzinakos, Film grain noise removal and generation for color images, in Proceedings of the 1998 IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP’98 (Cat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 98CH36181), vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5, IEEE, 1998, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2957–2960.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [312] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yan and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hatzinakos, Signal-dependent film grain noise removal and generation based on higher-order statistics, Proceedings of the IEEE Signal Processing Workshop on Higher-Order Statistics, (1997), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 77–81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [313] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, Proximal dehaze-net: A prior learning-based deep network for single image dehazing, in Proceedings of the european conference on computer vision (ECCV), 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 702– 717.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [314] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ge, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Si, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Low-dose ct denoising via sinogram inner- structure transformer, IEEE Transactions on Medical Imaging, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [315] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hong, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhao, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shao, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cui, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='- H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, Diffusion models: A comprehensive survey of methods and applications, arXiv preprint arXiv:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='00796, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [316] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shi, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mou, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kalra, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sun, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, Low-dose ct image denoising using a generative adversarial network with wasserstein distance and perceptual loss, IEEE transactions on medical imaging, 37 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1348–1357.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [317] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yao, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jin, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ban, Dense residual transformer for image denoising, Electronics, 11 (2022), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 418.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [318] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yi and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Babyn, Sharpness-aware low-dose ct denoising using conditional generative adversarial network, Journal of digital imaging, 31 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 655–669.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [319] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Song, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Seff, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xiao, Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop, arXiv preprint arXiv:1506.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='03365, (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [320] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yu, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Sapiro, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Mallat, Solving inverse problems with piecewise linear estimators: From Gaussian mixture models to structured sparsity, IEEE Transactions on Image Processing, 21 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 2481–2499.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [321] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yue, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yong, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhao, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Meng, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Variational denoising network: Toward blind noise modeling and removal, Advances in neural information processing systems, 32 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [322] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zamir, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arora, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Khan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hayat, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Khan, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, Restormer: Efficient transformer for high-resolution image restoration, in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 5728–5739.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [323] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zamir, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Arora, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Khan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hayat, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Khan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shao, Multi-stage progressive image restoration, in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 14821–14831.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [324] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zeng, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Cheung, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Ng, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Pang, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, 3D point cloud denoising using graph laplacian regularization of a low dimensional manifold model, IEEE Transactions on Image Processing, PP (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [325] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fadili, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Starck, Wavelets, ridgelets, and curvelets for poisson noise removal, IEEE Transactions on image processing, 17 (2008), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1093–1108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [326] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fadili, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Starck, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Olivo-Marin, Multiscale variance-stabilizing trans- form for mixed-poisson-gaussian processes and its applications in bioimaging, in 2007 IEEE Inter- national Conference on Image Processing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6, IEEE, 2007, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VI–233.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [327] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Goodfellow, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Metaxas, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Odena, Self-attention generative adversarial net- works, in International Conference on Machine Learning, PMLR, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 7354–7363.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 62 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' ELAD, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' KAWAR AND G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' VAKSMAN [328] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gool, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Timofte, Deep unfolding network for image super-resolution, in Pro- ceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3217– 3226.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [329] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gool, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Timofte, Plug-and-play image restoration with deep denoiser prior, IEEE Transactions on Pattern Analysis and Machine Intelligence, 44 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 6360–6376.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [330] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Meng, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Beyond a Gaussian denoiser: Residual learning of deep CNN for image denoising, IEEE Transactions on Image Processing, 26 (2017), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3142–3155.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [331] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gu, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Learning deep CNN denoiser prior for image restoration, in Proceedings of the IEEE CVPR, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 3929–3938.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [332] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, FFDNet: Toward a fast and flexible solution for CNN-based image denoising, IEEE Transactions on Image Processing, 27 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4608–4622.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [333] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zuo, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Deep plug-and-play super-resolution for arbitrary blur kernels, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1671– 1681.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [334] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, Vst-net: Variance-stabilizing transformation inspired network for poisson denoising, Journal of Visual Communication and Image Representation, 62 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 12–22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [335] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Isola, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Efros, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shechtman, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, The unreasonable effectiveness of deep features as a perceptual metric, in Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 586–595.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [336] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Xu, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jia, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Feng, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Feng, Denoising of 3d magnetic resonance images by using higher-order singular value decomposition, Medical Image Analysis, In press (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [337] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Desai, Adaptive denoising based on sure risk, IEEE signal processing letters, 5 (1998), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 265–267.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [338] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Qin, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, Rethinking noise synthesis and modeling in raw denoising, in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 4593–4601.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [339] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tian, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Kong, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhong, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Fu, Residual dense network for image restoration, IEEE Transactions on Pattern Analysis and Machine Intelligence, (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [340] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhu, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Nichols, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Smith, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Howard, A poisson-gaussian denoising dataset with real fluorescence microscopy images, in Proceedings of the IEEE/CVF Con- ference on Computer Vision and Pattern Recognition, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 11710–11718.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [341] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wen, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhu, Amp-net: Denoising-based deep unfolding for compressive image sensing, IEEE Transactions on Image Processing, 30 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 1487–1500.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [342] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhao, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shao, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Bao, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Li, A simple and robust deep convolutional approach to blind image denoising, in Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [343] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhou, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Schaefferkoetter, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Tham, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Yan, Supervised learning with cyclegan for low-dose fdg pet image denoising, Medical image analysis, 65 (2020), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 101770.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [344] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhou, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Jiao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Shi, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Huang, When AWGN-based denoiser meets real noises, in Proceedings of the AAAI Conference on Artificial Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 34, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 13074–13081.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [345] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhu, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Chen, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Heng, From noise modeling to blind image denoising, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 420–429.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [346] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wei, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Gan, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hua, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Liu, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Hu, Exploring discrete diffusion models for image captioning, arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content='11694, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' [347] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Zoran and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' Weiss, From learning models of natural image patches to whole image restoration, in IEEE International Conference on Computer Vision, 2011, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} +page_content=' 479–486.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/JdE1T4oBgHgl3EQfsAXy/content/2301.03362v1.pdf'} diff --git a/LNFRT4oBgHgl3EQf1jiU/content/tmp_files/2301.13657v1.pdf.txt b/LNFRT4oBgHgl3EQf1jiU/content/tmp_files/2301.13657v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..c0c46780d5be2c6147e65f461c18c3c7d4b96533 --- /dev/null +++ b/LNFRT4oBgHgl3EQf1jiU/content/tmp_files/2301.13657v1.pdf.txt @@ -0,0 +1,3308 @@ +arXiv:2301.13657v1 [math.AP] 31 Jan 2023 +Effective Boundary Conditions for Heat Equation Arising from +Anisotropic and Optimally Aligned Coatings in Three +Dimensions +Xingri Geng a,b +aDepartment of Mathematics, Southern University of Science and Technology, Shenzhen, P.R. China +b Department of Mathematics, National University of Singapore, Singapore +Abstract +We discuss the initial boundary value problem for a heat equation in a domain surrounded by a +layer. The main features of this problem are twofold: on one hand, the layer is thin compared to +the scale of the domain, and on the other hand, the thermal conductivity of the layer is drastically +different from that of the bulk; moreover, the bulk is isotropic, but the layer is anisotropic and +“optimally aligned” in the sense that any vector in the layer normal to the interface is an eigenvector +of the thermal tensor. +We study the effects of the layer by thinking of it as a thickless surface, +on which “effective boundary conditions” (EBCs) are imposed. In the three-dimensional case, we +obtain EBCs by investigating the limiting solution of the initial boundary value problem subject to +either Dirichlet or Neumann boundary conditions as the thickness of the layer shrinks to zero. These +EBCs contain not only the standard boundary conditions but also some nonlocal ones, including the +Dirichlet-to-Neumann mapping and the fractional Laplacian. One of the main features of this work is +to allow the drastic difference in the thermal conductivity in the normal direction and two tangential +directions within the layer. +Keywords. heat equation, thin layer, energy estimates, asymptotic behavior, effective boundary condi- +tions. +AMS subject classifications. 35K05, 35B40, 35B45,74K35. +1 +Introduction +This paper is concerned with the scenario of insulating an isotropic conducting body with a coating whose +thermal conductivity is anisotropic and drastically different from that of the body. Moreover, the coating +is thin compared to the scale of the body, resulting in multi-scales in the spatial variable. The difference +in thermal conductivity and spatial size leads to computational difficulty. Some examples of this type +of situation include cells with their membranes and thermal barrier coatings (TBCs) for turbine engine +blades (see Figure 1). To handle such situations, we view the coating as a thickless surface as its thickness +shrinks to zero, on which “effective boundary conditions” (EBCs) are imposed. These EBCs not only +provide an alternative way for numerical computation but also give us an analytic interpretation of the +effects of the coating. +The main purpose of this work is to find effective boundary conditions rigorously in a three-dimensional +domain. In the article of Chen, Pond, and Wang [4], EBCs were studied in the two-dimensional case when +the coating is anisotropic and “optimally aligned”. However, it is not straightforward to extend their +results in three dimensions because a degenerate equation that never happens in two dimensions arises. +E-mail addresses: gengxingri@u.nus.edu. +1 + +This paper treats the case when the domain is three-dimensional, and the coating is “optimally aligned” +with two tangent diffusion rates that may be different, which has not been covered by the previous results +yet. +Ω2 +∂Ω +Ω1 +Figure 1: Ω = Ω1 ∪ Ω2. +Γ +p +δ +n +To be more specific, we introduce our mathematical model as follows: let the body Ω1 be surrounded +by the coating Ω2 with uniform thickness δ > 0; let the domain Ω = Ω1 ∪ Ω2 ⊂ R3 as shown in Figure 1. +For any finite T > 0, consider the initial boundary value problem with the Dirichlet boundary condition + + + +ut − ∇ · (A(x)∇u) = f(x, t), +(x, t) ∈ QT , +u = 0, +(x, t) ∈ ST , +u = u0, +(x, t) ∈ Ω × {0}, +(1.1) +where QT := Ω × (0, T ) and ST := ∂Ω × (0, T ). Suppose that u0 ∈ L2(Ω), f ∈ L2(QT ), and A(x) is the +thermal conductivity given by +A(x) = +� +kI3×3, +x ∈ Ω1, +(aij(x))3×3 , +x ∈ Ω2, +where k is a positive constant independent of δ > 0, and the positive-definite matrix (aij(x)) is anisotropic +and “optimally aligned” in the coating Ω2, which means that any vector inside the coating normal to the +interface is always an eigenvector of A(x)− see (1.3) below for the precise definition. +Moreover, we also consider the initial value problem with the Neumann boundary condition + + + +ut − ∇ · (A(x)∇u) = f(x, t), +(x, t) ∈ QT , +∂u +∂nA = 0, +(x, t) ∈ ST , +u = u0, +(x, t) ∈ Ω × {0}, +(1.2) +where nA is the co-normal vector A(x)n, with n being the unit outer normal vector field on Γ(= ∂Ω1). +In this case, the Neumann boundary condition is the same as ∂u +∂n = 0 since the coating is “optimally +aligned” − see below. +In the three-dimensional case, since the thermal tensor A(x) is positive-definite, it has three orthogonal +eigenvectors and corresponding eigenvalues. Every eigenvalue measures the thermal conductivity of the +coating in the corresponding direction. By saying the coating Ω2 is optimally aligned, we mean that +A(x)n(p) = σn(p), +∀x ∈ Ω2, +(1.3) +where Ω2 is thin enough and Γ is smooth enough such that the projection p of x onto Γ is unique, and +n(p) is the unit outer normal vector of Γ at p. This concept was first introduced by Rosencrans and +Wang [18] in 2006. +Because of the optimally aligned coatings, A(x) must have two eigenvectors in the tangent directions. +If A(x) has two identical eigenvalues in the tangent directions, then within the coating Ω2, we assume +that the thermal tensor A(x) satisfies +Type I condition : +A(x)s(p) = µs(p), +∀x ∈ Ω2, +(1.4) +2 + +where s(p) is an arbitrary unit tangent vector of Γ at p; σ and µ are called the normal conductivity and +the tangent conductivity, respectively. +If A(x) has two different eigenvalues µ1 and µ2 in the tangent directions, then two tangent directions +are fixed on Γ. According to the Hairy Ball Theorem in algebraic topology, there is no nonvanishing +continuous tangent vector field on even-dimensional n−spheres. Therefore, in this paper, we consider Γ +to be a topological torus that is any topological space homeomorphic to a torus. Within the coating Ω2, +we assume that the thermal tensor A(x) satisfies +Type II condition: +A(x)τττ 1(p) = µ1τττ 1(p), +A(x)τττ 2(p) = µ2τττ 2(p), +(1.5) +where τττ 1(p) and τττ 2(p) are two orthonormal eigenvectors of A(x) in the tangent plane of Γ at p; µ1 and +µ2 are two different tangent conductivities in the corresponding tangent directions. +Throughout this article, Ω1 is fixed and bounded with C2 smooth boundary Γ; the coating Ω2 is +uniformly thick with ∂Ω approaching Γ as δ → 0; σ, µ, µ1 and µ2 are positive functions of δ. +There have been rich, deep, and interesting results about the idea of using EBCs in the literature. It +can date back to the classic book of Carslaw and Jaeger [3], where EBCs were first recorded. Subsequently, +Sanchez-Palencia [19] first investigated the “interior reinforcement problem” for the elliptic and parabolic +equations in a particular case when the reinforcing material is lens-shaped. Following this line of thought, +Brezis, Caffarelli, and Friedman [1] rigorously studied the elliptic problem for both interior and boundary +reinforcement. See Li and Zhang [10, 14] for further development. For the case of a rapid oscillating +thickness of the coating, see [2]. Later on, lots of follow-up works of EBCs for general coatings and +“optimally aligned coatings” emerged (see [4,5,7–9,11–14]). Furthermore, there is also a review paper [20] +that provides a thorough investigation of this topic. +The layout of this paper is as follows. Section 2 is devoted to establishing some basic energy estimates +and a compactness argument, showing that u converges to some v after passing to a subsequence of +{u}δ>0 as δ → 0. In Section 3, we derive effective boundary conditions on Γ × (0, T ) for the case of Type +I condition, in which two auxiliary functions are developed via harmonic extensions. In Section 4, based +on two different harmonic extensions, we address effective boundary conditions on Γ × (0, T ) for the case +of Type II condition. +2 +Weak solutions +In this section, we begin with some a priori estimates, by which a compact argument is established to +study the asymptotic behavior of the weak solution of (1.1) or (1.2). +2.1 +Preliminaries +Before going into energy estimates, we first introduce some important Sobolev spaces: let W 1,0 +2 +(QT ) +be the subspace of functions belonging to L2(QT ) with first order weak derivatives in x also being in +L2(QT ); W 1,1 +2 +(QT ) is defined similarly with the first order weak derivative in t belonging to L2(QT ); +W 1,0 +2,0 (QT ) is the closure in W 1,0 +2 +(QT ) of C∞ functions vanishing near ST , and W 1,1 +2,0 (QT ) is defined +similarly. Furthermore, denote V 1,0 +2,0 (QT ) := W 1,0 +2,0 (QT ) ∩ C +� +[0, T ]; L2(Ω) +� +. +Let us define one more Sobolev space on Q1 +T = Ω1×(0, T ) : V 1,0 +2 +(Q1 +T ) = W 1,0 +2 +(Q1 +T )∩C +� +[0, T ]; L2(Ω1) +� +. +We endow all these spaces with natural norms. +For simplicity, we write +� +QT u(x, t)dxdt instead of +� T +0 +� +Ω u(x, t)dxdt. +Definition 2.1. A function u is said to be a weak solution of the Dirichlet problem (1.1), if u ∈ V 1,0 +2,0 (QT ) +and for any ξ ∈ C∞(QT ) satisfying ξ = 0 at t = T and near ST , it holds that +A[u, ξ] := − +� +Ω +u0ξ(x, 0)dx + +� +QT +(A(x)∇u · ∇ξ − uξt − fξ) dxdt = 0. +(2.1) +3 + +The weak solution of the Neumann problem (1.2) is defined in the same way, except that u ∈ V 1,0 +2 +(QT ), +and ξ ∈ C∞(QT ) satisfies ξ = 0 at t = T . Moreover, for any small δ > 0, (1.1) or (1.2) admits a unique +weak solution u ∈ W 1,0 +2 +(QT ) ∩ C +� +[0, T ]; L2(Ω) +� +. As is well known, u satisfies the following “transmission +conditions” in the weak sense +u1 = u2, +k∇u1 · n = σ∇u2 · n +on Γ, +(2.2) +where u1 and u2 are the restrictions of u on Ω1 × (0, T ) and Ω2 × (0, T ), respectively. +2.2 +Basic energy estimates +In the sequel, for notational convenience, let C(T ) represent a generic positive constant depending only +on T ; let O(1) represent a quantity that varies from line to line but is independent of δ. We provide the +following energy estimates for the weak solution of (1.1) or (1.2). +Lemma 2.1. Suppose f ∈ L2(QT ) and u0 ∈ L2(Ω). Then, any weak solution u of (1.1) or (1.2) satisfies +the following inequalities. +(i) max +t∈[0,T ] +� +Ω +u2(x, t)dx + +� +QT +∇u · A(x)∇udxdt ≤ C(T ) +�� +Ω +u2 +0dx + +� +QT +f 2dxdt +� +, +(ii) max +t∈[0,T ] t +� +Ω +∇u · A(x)∇udx + +� +QT +tu2 +tdxdt ≤ C(T ) +�� +Ω +u2 +0dx + +� +QT +f 2dxdt +� +. +Proof. (i) and (ii) can be proved formally by a standard method. Multiplying (1.1) and (1.2) by u and +tut respectively, we perform the integration by parts in both x and t over Ω×(0, T ). By the same analysis +on the Galerkin approximation of u, this formal argument can be made rigorous. Hence, we omit the +details. +We prove our results using only H1 a priori estimates, and higher order estimates are not needed for +Theorem 3.1 and 4.1 here. We refer interested readers to [4, Theorem 5] for more general higher order +estimates for (1.1) and (1.2). +For general coefficients A(x, t) = (aij(x, t))N×N, let aij(x, t) satisfy +� +i,j +aij(x, t)ξiξj ≥ λ0|ξ|2, +for any ξ ∈ RN and some constant λ0 > 0. We also address the regularity results of u near the interface +Γ without rigorous proof. +Theorem 2.1. Let m be an integer with m ≥ 2 and a ∈ (0, 1). +Suppose that Γ ∈ Cm+a, f ∈ +Cm−2+a,(m−2+a)/2 � +Ωh × [0, T ] +� +(h = 1, 2), and aij ∈ Cm−1+a,(m−1+a)/2(Ωh×[0, T ]), then for any t0 > 0, +the weak solution u of (1.1) or (1.2) satisfies +u ∈ Cm+a,(m+a)/2(N h × [t0, T ]), +where N is a narrow neighborhood of Γ and Nh = N ∩ Ωh. +Proof. The proof of the theorem can be found in [4], and hence we omit the details. +2.3 +A compactness argument +We next turn to the compactness of the family of functions {u}δ>0. +Theorem 2.2. Suppose that Γ ∈ C2, u0 ∈ L2(Ω) and f ∈ L2(QT ) with all functions remaining unchanged +as δ → 0. Then, after passing to a subsequence of δ → 0, the weak solution u of (1.1) or (1.2) converges +to some v weakly in W 1,0 +2 +(Ω1 × (0, T )), strongly in C +� +[0, T ]; L2(Ω1) +� +. +4 + +Proof of the theorem 2.2. According to Lemma 2.1, {u}δ>0 is bounded in W 1,0 +2 +(Ω1 × (0, T )). For any +small t0 ∈ (0, T ], {u}δ>0 is also bounded in C([t0, T ]; H1(Ω1)). By Banach-Eberlein theorem, u converges +to some v weakly in C([t0, T ]; H1 +0(Ω1)) after passing to a subsequence of δ → 0. Together with the +compactness of the embedding H1(Ω1) ֒→ L2(Ω1), for any fixed t0, {u}δ>0 is precompact in L2(Ω1). +Furthermore, the functions {u}δ>0 : t ∈ [t0, T ] �→ u(·, t) ∈ L2(Ω1) are equicontinuous because the term +� +QT tu2 +tdxdt is bounded due to Lemma 2.1. Consequently, the generalized Arzela-Ascoli theorem suggests +that after passing to a further subsequence of δ → 0, u → v strongly in C +� +[t0, T ]; L2(Ω1) +� +. +In what follows, it suffices to prove that the strong convergence of u → v is in C +� +[0, T ]; L2(Ω1) +� +. To +this end, we take a sequence un +0 ∈ C∞ +0 (Ω1) such that ∥u0 − un +0∥L2(Ω) ≤ 1 +n + ∥u0∥L2(Ω2), where un +0 = 0 in +Ω2 and ∥∇un +0∥L2(Ω) ≤ C(n). Such un +0 can be constructed by multiplying u0 by cut-off functions in the +outer normal direction of Γ such that the gradient of un +0 is independent of δ. +Then, we decompose u = u1 + u2, where u1 and u2, respectively, are the unique weak solutions of the +following problems: + + + +(u1)t − ∇ · (A(x)∇u1) = 0, +(x, t) ∈ QT, +u1 = 0, +(x, t) ∈ ST , +u1 = u0 − un +0, +(x, t) ∈ Ω × {0}, +(2.3) + + + +(u2)t − ∇ · (A(x)∇u2) = f(x, t), +(x, t) ∈ QT , +u2 = 0, +(x, t) ∈ ST , +u2 = un +0, +(x, t) ∈ Ω × {0}. +(2.4) +By the similar proof as used in Lemma 2.1, we have the energy estimates +∥u1(·, t)∥L2(Ω) ≤ ∥u0 − un +0∥L2(Ω) ≤ 1 +n + ∥u0∥L2(Ω2). +(2.5) +Employing energy estimates on (2.4), we get +� t +0 +� +Ω +(u2)2 +t dxdt + +� +Ω +∇u2(x, t) · A(x)∇u2(x, t)dx ≤ +� t +0 +� +Ω +f 2dxdt + +� +Ω +∇un +0 · A(x)∇un +0 dx +≤ +� t +0 +� +Ω +f 2dxdt + k1 +� +Ω1 +|∇un +0|2dx =: F(f, n). +(2.6) +Combining this with (2.5), for any t ∈ [0, t0], we obtain +∥u2(·, t) − un +0(·)∥2 +L2(Ω) = 2 +� t +0 +� +Ω +(u2(x, t) − un +0(x)) (u2)tdxdt +≤ 2 +�� t +0 +� +Ω +(u2(x, t) − un +0(x))2 +� 1 +2 �� t +0 +� +Ω +(u2)2 +t +� 1 +2 +≤ 2√t0 max +t∈[0,t0] ∥u2(·, t) − un +0(·)∥L2(Ω) (F(f, n)) +1 +2 , +from which it follows that +max +t∈[0,t0] ∥u2(·, t) − un +0(·)∥ ≤ 2√t0 (F(f, n)) +1 +2 . +(2.7) +Finally, for t ∈ [0, t0], it holds from (2.5) and (2.7) that +∥u(·, t) − u0(·)∥L2(Ω1) ≤ ∥u1(·, t)∥L2(Ω1) + ∥u2(·, t) − u0(·)∥L2(Ω1) + ∥u0 − u0(·)∥L2(Ω1) +≤ 2 +n + 2∥u0∥L2(Ω2) + 2√t0 (F(f, n)) +1 +2 . +Because t0 and δ are small enough, ∥u(·, t) − u0(·)∥L2(Ω1) can be arbitrary small for t ∈ [0, t0]. +Using the fact that u → v strongly in C +� +[t0, T ]; L2(Ω1) +� +, we conclude that u → v strongly in +C +� +[0, T ]; L2(Ω1) +� +if we define v(·, 0) = u0. +5 + +3 +EBCs for Type I condition +Throughout this section, we always have the assumption of Type I condition (1.4). Under this condition, +we aim to derive EBCs on Γ × (0, T ) as the thickness of the layer shrinks to zero. +Theorem 3.1. Suppose that A(x) is given in (1.1) or (1.2) and satisfies (1.4). Let u0 ∈ L2(Ω) and +f ∈ L2(QT ) with functions being independent of δ. Assume further that σ and µ satisfy the scaling +relationships +lim +δ→0 σµ = γ ∈ [0, ∞], +lim +δ→0 +σ +δ = α ∈ [0, ∞], +lim +δ→0 µδ = β ∈ [0, ∞]. +Let u be the weak solution of (1.1) or (1.2), then as δ → 0, u → v weakly in W 1,0 +2 +(Ω1 × (0, T )), strongly +in C([0, T ]; L2(Ω1)), where v is the weak solution of +� +vt − k∆v = f(x, t), +(x, t) ∈ Ω1 × (0, T ), +v = u0, +(x, t) ∈ Ω1 × {0}, +(3.1) +subject to the effective boundary conditions on Γ × (0, T ) listed in Table 1. +Table 1: Effective boundary conditions on Γ × (0, T ). +EBCs on Γ × (0, T ) for (1.1). +As δ → 0 +σ +δ → 0 +σ +δ → α ∈ (0, ∞) +σ +δ → ∞ +σµ → 0 +∂v +∂n = 0 +k ∂v +∂n = −αv +v = 0 +√σµ → γ ∈ (0, ∞) +k ∂v +∂n = γJ ∞ +D [v] +k ∂v +∂n = γJ γ/α +D +[v] +v = 0 +σµ → ∞ +∇Γv = 0, +� +Γ +∂v +∂n = 0 +∇Γv = 0, +� +Γ(k ∂v +∂n + αv)dx = 0 +v = 0 +EBCs on Γ × (0, T ) for (1.2). +As δ → 0 +µδ → 0 +µδ → β ∈ (0, ∞) +µδ → ∞ +σµ → 0 +∂v +∂n = 0 +∂v +∂n = 0 +∂v +∂n = 0 +√σµ → γ ∈ (0, ∞) +∂v +∂n = 0 +k ∂v +∂n = γJ β/γ +N +[v] +k ∂v +∂n = γJ ∞ +N [v] +σµ → ∞ +∂v +∂n = 0 +k ∂v +∂n = β∆Γv +∇Γv = 0, +� +Γ +∂v +∂n = 0 +We now focus on the boundary conditions arising in Table 1. The boundary condition ∇Γv = 0 on +Γ × (0, T ) indicates that v is a constant in the spatial variable (but it may depend on t), where ∇Γ is +the surface gradient on Γ. The operator ∆Γ is the Laplacian-Beltrami operator defined on Γ, and the +boundary condition k ∂v +∂n = β∆Γv can be understood as a second-order partial differential equation on +Γ, revealing that the thermal flux across Γ in the outer normal direction causes heat accumulation that +diffuses with the diffusion rate β. +J H +D and J H +N , as shown in Table 1, are linear and symmetric operators mapping the Dirichlet value to +the Neumann value. More precisely, for H ∈ (0, ∞), and smooth g defined on Γ, we define +J H +D [g](s) := ΘR(s, 0) +and +J H +N [g](s) := ΠR(s, 0), +where Θ and Π are, respectively, the bounded solutions of +� +ΘRR + ∆ΓΘ = 0, +Γ × (0, H), +Θ(s, 0) = g(s), +Θ(s, H) = 0, +� +ΠRR + ∆ΓΠ = 0, +Γ × (0, H), +Π(s, 0) = g(s), +ΠR(s, H) = 0. +The analytic formulas for J H +D [g] and J H +N [g] are given and deferred to Subsection 3.2. We then define +(J ∞ +D [g], J ∞ +N [g]) := lim +H→∞ +� +J H +D [g], J H +N [g] +� +, +where J ∞ +D [g] = J ∞ +N [g] = − (−∆Γ)1/2 g is the fractional Laplacian-Beltrami defined on g. +6 + +3.1 +Definition, existence and uniqueness of weak solutions of effective models +We define weak solutions of (3.1) together with the boundary conditions in Table 1. +Definition 3.1. Let the test function ξ ∈ C∞(Q1 +T ) satisfy ξ = 0 at t = T . +(1) A function v is said to be a weak solution of (3.1) with the Dirichlet boundary condition v = 0 if +v ∈ V 1,0 +2,0 (Q1 +T ), and for any test function ξ, v satisfies +L[v, ξ] := − +� +Ω1 +u0(x)ξ(x, 0)dx + +� T +0 +� +Ω1 +(k∇v · ∇ξ − vξt − fξ) dxdt = 0. +(3.2) +(2) A function v is said to be a weak solution of (3.1) with the boundary conditions ∇Γv = 0 and +� +Γ(k ∂v +∂n + αv) = 0 if for almost everywhere fixed t ∈ (0, T ), the trace of v on Γ is a constant, and if +∇Γξ = 0 on Γ, it holds that v ∈ V 1,0 +2 +(Q1 +T ) and v satisfies +L[v, ξ] = − +� T +0 +� +Γ +αvξdsdt. +(3) A function v is said to be a weak solution of (3.1) with the boundary condition k ∂v +∂n = B[v], where +B[v] = −αv, or γJ H +D [v], or γJ H +N [v] for H ∈ (0, ∞], if v ∈ V 1,0 +2 +(Q1 +T ) and if for any test function ξ, v +satisfies +L[v, ξ] = +� T +0 +� +Γ +vB[ξ]dsdt. +(4) A function v is said to be a weak solution of (3.1) with the boundary condition k ∂v +∂n = β∆Γv, if +v ∈ V 1,0 +2 +(Q1 +T ) with its trace belonging to L2 � +(0, T ); H1(Γ) +� +, and if for any test function ξ, v satisfies +L[v, ξ] = −β +� T +0 +� +Γ +∇Γv∇Γξdsdt. +A weak solution of (3.1) satisfies the initial value in the sense that v(·, t) → u0(·) in L2(Ω1) as t → 0. +Moreover, the existence and uniqueness of the weak solution of (3.1) with the boundary conditions in +Tables 1 are stated without proof in the following theorem. +Theorem 3.2. Suppose that Γ ∈ C1, u0 ∈ L2(Ω1) and f ∈ L2(Q1 +T ). Then, (3.1) with any boundary +condition in Tables 1 has one and only one weak solution as defined in Definition 3.1. +Proof. For a rigorous proof of the theorem, the reader is referred to [4] (see also [16] and [21]). +Before proceeding further, enlightened by [12], we first begin with a geometric preparation for the +coating Ω2 by introducing the curvilinear coordinates. Now, we define a mapping F +Γ × (0, δ) �→ x = F(p, r) = p + rn(p) ∈ R3, +where p is the projection of x on Γ; n(p) is the unit normal vector of Γ pointing out of Ω1 at p; r is the +distance from x to Γ. +As is well known ( [6], Lemma 14.16), for a small δ > 0, F is a C1 smooth diffeomorphism from +Γ × (0, δ) to Ω2; r = r(x) is a C2 smooth function of x and is seen as the inverse of the mapping +x = F(p, r). By using local coordinates s = (s1, s2) in a typical chart on Γ, we then have +p = p(s) = p(s1, s2), +x = F(p(s), r) = F(s, r), +dx = (1 + 2Hr + κr2)dsdr +in +Ω2, +(3.3) +where ds represents the surface element; H(s)and κ(s) are the mean curvature and Gaussian curvature +at p on Γ, respectively. +7 + +In the curvilinear coordinates, the Riemannian metric tensor at x ∈ Ω2 induced from R3 is defined as +G(s, r) with elements +gij(s, r) = gji(s, r) =< Fi, Fj >R3, +i, j = 1, 2, 3, +where Fi = Fsi for i = 1, 2 and F3 = Fr. Let |G| := detG and gij(s, r) be the element of the inverse +matrix of G, denoted by G−1. +In the curvilinear coordinates (s, r), the derivatives of u are given as follows +∇u = urn + ∇su; +∇su = +� +i,j=1,2 +gij(s, r)usjFsi(s, r) +and +∇Γu = +� +i,j=1,2 +gij(s, 0)usjpsi(s); +(3.4) +∇ · (A(x)∇u) = +σ +� +|G| +�� +|G|ur +� +r + µ∆su; +∆su = ∇s · ∇su = +1 +� +|G| +� +ij=1,2 +�� +|G|gij(s, r)usi +� +sj . +(3.5) +Moreover, if A(x) satisfies Type I condition (1.4), then in Ω2, we have +A(x) = σn(p) ⊗ n(p) + µ +� +ij +gij(s, r)Fsi(s, r) ⊗ Fsj(s, r). +(3.6) +3.2 +Auxiliary functions +Our goal for this subsection is to construct auxiliary functions and estimate their asymptotic behaviors +when the thickness of the thin layer is sufficiently small. Our idea of developing the auxiliary functions +is adapted from [4] via a harmonic extension. +We construct two auxiliary functions for Type I condition (1.4) by defining θ and π. +For every +t ∈ [0, T ], let θ(s, r, t) and π(s, r, t) be bounded solutions of +� σθrr + µ∆Γθ = 0, +Γ × (0, δ), +θ(s, 0, t) = g(s), +θ(s, δ, t) = 0, +� σπrr + µ∆Γπ = 0, +Γ × (0, δ), +π(s, 0, t) = g(s), +πr(s, δ, t) = 0, +(3.7) +where g(s) = g(p(s)) = ξ(s, 0, t). From the maximum principle, θ and π are unique. +Multiplying (3.7) by θ and π respectively, and implementing integration by parts over Γ × (0, δ), we +arrive at +� δ +0 +� +Γ +� +σθ2 +r + µ|∇Γθ|2� += − +� +Γ +σθr(s, 0, t)g(s), +� δ +0 +� +Γ +� +σπ2 +r + µ|∇Γπ|2� += − +� +Γ +σπr(s, 0, t)g(s).(3.8) +Multiplying (3.7) by u respectively and performing the integration by parts again, we get +� δ +0 +� +Γ +(σθrur + µ∇Γθ · ∇Γu) = − +� +Γ +σθr(s, 0, t)u(p(s), t), +� δ +0 +� +Γ +(σπrur + µ∇Γπ · ∇Γu) = − +� +Γ +σπr(s, 0, t)u(p(s), t). +(3.9) +To eliminate σ and µ, we assert r = R +� +σ/µ and plug r into (3.7). Suppressing the time dependence, +this leads to +Θ(s, R) = θ(s, R +� +σ/µ, t), +Π(s, R) = π(s, R +� +σ/µ, t). +Consequently, (3.7) is equivalent to +� ΘRR + ∆ΓΘ = 0, +Γ × (0, h), +Θ(s, 0) = g(s), +Θ(s, h) = 0, +� ΠRR + ∆ΓΠ = 0, +Γ × (0, h), +Π(s, 0) = g(s), +ΠR(s, h) = 0, +(3.10) +8 + +where h := δ +� µ +σ = +µδ +√σµ = +√σµ +σ/δ . We now define two Dirichlet-to-Neumann operators +J h +D[g](s) := ΘR(s, 0) +and +J h +N[g](s) := ΠR(s, 0). +(3.11) +Observe +σθr(s, 0, t) = √σµΘR(s, 0) = √σµJ h +D[g](s), +σπr(s, 0, t) = √σµΠR(s, 0) = √σµJ h +N[g](s). (3.12) +Rigorous formulas for J h +D[g] and J h +N[g] are given in eigenvalues and eigenfunctions of −∆Γ by using +separation of variables, from which it follows that +Θ(s, R) = +∞ +� +n=1 +−gne−√λnh +2sinh(√λnh) +� +e +√λnR − e +√λn(2h−R)� +en(s), +(3.13) +Π(s, R) = +∞ +� +n=1 +gne−√λnh +2cosh(√λnh) +� +e +√λnR + e +√λn(2h−R)� +en(s), +(3.14) +where gn :=< en, g >= +� +Γ engds; λn and en(s) are the eigenvalues and the corresponding eigenfunctions +of the Laplacian-Beltrami operator −∆Γ defined on Γ. +Subsequently, it follows from (3.11) and (3.13) that +J h +D[g](s) = − +∞ +� +n=1 +√λnen(s)gn +tanh(√λnh), +J h +N[g](s) = − +∞ +� +n=1 +� +λnen(s)gntanh( +� +λnh). +(3.15) +Furthermore, if h → H ∈ (0, ∞], we have +|J h +D[g](s) − J H +D [g](s)| = +∞ +� +n=1 +� +λnen(s)gn +� +1 +tanh(√λnH) − +1 +tanh(√λnh) +� += |H − h| +∞ +� +n=1 +λnen(s)gn +−4 +(e +√λnh′ − e−√λnh′)2 += O(|H − h|), +(3.16) +for some h′ between h and H. This implies the uniform convergence in h. By using a similar analysis as +above, if h → H ∈ (0, ∞], J h +N[g] converges uniformly to J H +N [g] where J ∞ +D [g] = J ∞ +N [g] := −(−∆Γ)1/2g. +In the follow-up, we are going to estimate the size of the term ΘR(s, 0) and ΠR(s, 0) for a sufficiently +small δ. On one hand, if h is small and h → 0 as δ → 0, then it follows from (3.15) that +����ΘR(s, 0) + g(s) +h +���� ≤ h∥g∥C2(Γ), +|ΠR(s, 0) − h∆Γg| ≤ O(h3). +(3.17) +Combining this with (3.12), we get +√σµΘR(s, 0) = σ +δ +� +−g(s) + O(h2) +� +, +√σµΠR(s, 0) = µδ +� +∆Γg(s) + O(h2) +� +. +(3.18) +On the other hand, if h → H ∈ (0, ∞] as δ → 0, then from the Taylor expansion for Θ(s, R), we +obtain +ΘR(s, 0) = Θ(s, R) − Θ(s, 0) +R +− R +2 ΘRR(s, R), +for some R ∈ [0, R]. Taking R = min{h, 1}, from the maximum principle, we have +∥ΘR∥L∞(Γ) ≤ 2 +R∥Θ∥L∞(Ω2) + R∥ΘRR∥L∞(Ω2) ≤ 3∥g∥C2(Γ) +R +, +9 + +from which it turns out that +√σµ∥ΘR∥L∞(Γ) = O(1)√σµ +R +. +(3.19) +By the similar analysis on ΠR, if h → H ∈ (0, ∞] as δ → 0, then we have +∥ΠR∥L∞(Γ) = O(1). +(3.20) +We end this subsection by mentioning that for H ∈ (0, ∞), J H +D [g] and J H +N [g] are defined for smooth +g. However, it is easy to show that they are also well-defined for given any g ∈ H +1 +2 (Γ) where H +1 +2 (Γ) +is defined by the completion of smooth functions under the H +1 +2 (Γ) norm. Moreover, J H +D and J H +N : +H +1 +2 (Γ) → H− 1 +2 (Γ) are linear and bounded, where H− 1 +2 (Γ) is the dual space of H +1 +2 (Γ). +3.3 +Proof of Theorem 3.1 +The main result of this subsection is to prove Theorem 3.1, in which we derive EBCs on Γ × (0, T ). +Proof of Theorem 3.1. According to Theorem 2.2, the weak solution u of (1.1) or 1.2 converges to some +v weakly in W 1,0 +2 +(Ω1 × (0, T )), and strongly in C +� +[0, T ]; L2(Ω1) +� +after passing to a subsequence of δ > 0. +Thus, given any subsequence of δ, we emphasize that we can ensure that u → v in all above spaces after +passing to a further subsequence. In the further, we show that v is a weak solution of (3.1) with effective +boundary conditions listed in Table 1. By what we have proved in Theorem 3.2, v is unique. The fact +that u → v without passing to any subsequence of δ > 0, is a consequence of the uniqueness. +To derive the EBCs on Γ × (0, T ), we complete our proof in the following two steps: one is for the +Dirichlet problem (1.1), and the other is for the Neumann problem (1.2). +Step 1. Effective boundary conditions for the Dirichlet problem (1.1). +To begin with the proof, we assume that all conditions in Theorem 3.1 hold. Let the test function +ξ ∈ C∞(Ω1 × [0, T ]) with ξ = 0 at t = T , and extend ξ to the domain Ω × [0, T ] by defining +ξ(x, t) = +� +ξ(x, t), +x ∈ Ω1, +θ(p(x), r(x), t), +x ∈ Ω2, +where θ is introduced in (3.7). It is easy to check that ξ ∈ W 1,1 +2,0 (QT ), and ξ is called the harmonic +extension of ξ. +Since u is a weak solution of (1.1), it follows from Definition 2.1 that +A[u, ξ] = − +� +Ω +u0(x)ξ(x, 0)dx + +� T +0 +� +Ω +� +∇ξ · A∇u − uξt − fξ +� +dxdt = 0. +(3.21) +Rewrite (3.21) as +� T +0 +� +Ω1 +k∇ξ · ∇udxdt − +� +Ω +u0(x)ξ(x, 0)dx − +� T +0 +� +Ω +(uξt + fξ)dxdt = − +� T +0 +� +Ω2 +∇θ · A∇udxdt.(3.22) +Since u → v weakly in W 1,0 +2 +(Ω1 × (0, T )), and strongly in C +� +[0, T ]; L2(Ω1) +� +as δ → 0, we summarize as + + + + + +� +QT uξtdxdt → +� +Q1 +T vξtdxdt, +� +Q1 +T ∇u · ∇ξdxdt → +� +Q1 +T ∇v · ∇ξdxdt, +� +QT fξdxdt → +� +Q1 +T fξdxdt, +from which the left-hand side of (3.22) is equivalent to +L[v, ξ] := +� T +0 +� +Ω1 +k∇ξ · ∇vdxdt − +� +Ω1 +u0(x)ξ(x, 0)dx − +� T +0 +� +Ω1 +(vξt + fξ)dxdt. +(3.23) +10 + +The remainder of the following focuses on the right-hand side of (3.22). Using the curvilinear coordi- +nates (s, r), by virtue of (3.3), (3.4) and (3.6), we have +RHS := − +� T +0 +� +Ω2 +∇θ · A∇udxdt += − +� T +0 +� +Γ +� δ +0 +(σθrur + µ∇sθ∇su) (1 + 2Hr + κr2)drdsdt += − +� T +0 +� +Γ +� δ +0 +(σθrur + µ∇Γθ∇Γu) − +� T +0 +� +Γ +� δ +0 +(σθrur + µ∇Γθ∇Γu)(2Hr + κr2) +− +� T +0 +� +Γ +� δ +0 +µ(∇sθ∇su − ∇Γθ∇Γu)(1 + 2Hr + κr2) +=:I + II + III. +(3.24) +Due to (3.9) and (3.12), it holds that +I := +� T +0 +Idt = √σµ +� T +0 +� +Γ +u(p(s), t)ΘR(s, 0)dsdt. +(3.25) +Subsequently, in view of (3.8) and (3.19), it follows from Lemma 2.1 that +|II| ≤ +� T +0 +����� +� +Γ +� δ +0 +(σθrur + µ∇Γθ∇Γu)(2Hr + κr2)drds +����� dt +=O(δ) +� T +0 +�� +Γ +� δ +0 +σθ2 +r + µ|∇Γθ|2 +�1/2 �� +Ω +σu2 +r + µ|∇Γu|2 +�1/2 +dt +=O(δ) +� T +0 +1 +√ +t +�� +Γ +σ|θr(s, 0, t)| +�1/2 +dt +=O(δ) +√ +T(σµ)1/4||ΘR||1/2 +L∞(Γ), +(3.26) +where we have used H¨oder inequality. Consequently, using (3.4), (3.8) and (3.19), we have +|III| ≤ +����� +� T +0 +� +Γ +� δ +0 +µ(∇sθ∇su − ∇Γθ∇Γu)(1 + 2Hr + κr2) +����� +=O(δ) +� T +0 +� +Γ +� δ +0 +µ| +� +ij +θsiusj| +=O(δ) +� T +0 +�� +Γ +� δ +0 +σθ2 +r + µ|∇Γθ|2 +�1/2 �� +Ω +σu2 +r + µ|∇Γu|2 +�1/2 +dt +=O(δ) +√ +T(σµ)1/4||ΘR||1/2 +L∞(Γ), +(3.27) +where Lemma 2.1 and H¨oder inequality were used. +To investigate the asymptotic behavior of the right-hand side of (3.22) as δ → 0, we consider the +following cases (1) σ +δ → 0, (2) σ +δ → α ∈ (0, ∞), (3) σ +δ → ∞. +Case 1. σ +δ → 0 as δ → 0. +Subcase (1i). σµ → 0 as δ → 0. In view of (3.4), (3.8), (3.12), (3.18) and (3.19), we have +|RHS| ≤O(1) +� T +0 +�� +Γ +� δ +0 +� +σθ2 +r + µ|∇sθ|2� +�1/2 �� +Γ +� δ +0 +� +σu2 +r + µ|∇su|2� +�1/2 +dt = O( +√ +T)max{ +�σ +δ , (σµ)1/4}, +11 + +where Lemma 2.1 was used. From this, we have L[v, ξ] = 0, implying that v satisfies ∂v +∂n = 0 on Γ×(0, T ). +Subcase (1ii). √σµ → γ ∈ (0, ∞) as δ → 0. In this case, h → ∞. By the weak convergence of u, as +δ → 0, it holds from (3.9) that +I = √σµ +� +Γ +ΘRu → γ +� +Γ +J ∞ +D [ξ]v. +Moreover, combining (3.19), (3.20), (3.26) and (3.27), we have |II + III| → 0 as δ → 0. It turns out that +L[v, ξ] = γ +� T +0 +� +Γ +vJ ∞ +D [ξ], +which means that v satisfies k ∂v +∂n = γJ ∞ +D [v] on Γ × (0, T ). +Subcase (1iii). σµ → ∞. In this case, h → ∞ as δ → 0. Divided both sides of (3.21) by √σµ and +sending δ → 0, we obtain +� T +0 +� +Γ +vJ ∞ +D [ξ] = 0. +Because the range of J ∞ +D [·] contains {en}∞ +n=1 for almost everywhere t ∈ (0, T ), it turns out that ∇Γv = 0 +on Γ. We further choose a special test function ξ such that ξ(s, 0, t) = m(t) for some smooth function +m(t). Then, we construct a linear extension by defining θ(s, r, t) = (1 − r +δ)m(t). Consequently, a direct +computation leads to +RHS = − +� T +0 +� +Ω2 +∇θ · A∇udxdt = +� T +0 +σm(t) +δ +�� δ +0 +� +Γ +ur(1 + 2Hr + κr2) +� +dt += +� T +0 +σm(t) +δ +�� +Γ +u +� +dt − +� T +0 +σm(t) +δ +� δ +0 +� +Γ +u(2H + 2κr) +≤σ +δ +� T +0 +m(t) +� +O(1) + O( +√ +δ)∥u(·, t)∥L2(Ω2) +� +dt, +(3.28) +from which we derive L[v, ξ] = 0 as δ → 0. Then, v satisfies +� +Γ +∂v +∂n = 0 on Γ × (0, T ). +Case 2. σ +δ → α ∈ (0, ∞) as δ → 0. +Subcase (2i). σµ → 0 as δ → 0. In this case, h → 0. From (3.18) and (3.24)- (3.27), we have +I → −α +� T +0 +� +Γ +vξ +and +II + III → 0 as δ → 0, +from which it follows that +L[v, ξ] = −α +� T +0 +� +Γ +vξ. +(3.29) +So, v satisfies k ∂v +∂n = −αv on Γ × (0, T ). +Subcase (2ii). √σµ → γ ∈ (0, ∞) as δ → 0. In this case, h → H = γ/α ∈ (0, ∞). By virtue of (3.18) +and (3.24)- (3.27), it holds that +I → γ +� T +0 +� +Γ +vJ γ/α +D +and +II + III → 0 as δ → 0, +from which we get L[v, ξ] = γ � T +0 +� +Γ vJ γ/α +D +[ξ]. So, v satisfies k ∂v +∂n = γJ γ/α[v] on Γ × (0, T ). +Subcase (2iii). σµ → ∞ as δ → 0. In this case, h → ∞. Divided both sides of (3.22) by √σµ and +sending δ → 0, we obtain +� T +0 +� +Γ vJ ∞ +D [ξ] = 0, resulting in ∇Γv = 0 on Γ. Using the same test function +12 + +and the auxiliary function in Subcase (1iii), we obtain L[v, ξ] = −α +� T +0 +� +Γ vξ and ∇Γv = 0 on Γ, which +means v satisfies +� +Γ +� +k ∂v +∂n + αv +� += 0 on Γ × (0, T ). +Case 3. σ +δ → ∞ as δ → 0. +Subcase (3i). √σµ → γ ∈ [0, ∞) as δ → 0. In this case, h → 0. Divided both sides of (3.22) by σ/δ and +sending δ → 0, a combination of (3.8) and (3.24)- (3.27) leads to +δ +σ I → − +� T +0 +� +Γ +vξ = 0, +from which v satisfies v = 0 on Γ × (0, T ). +Subcase (3ii). σµ → ∞ as δ → 0. In this case, h → H ∈ [0, ∞]. If H = 0, then divided both sides of +(3.22) by σ/δ and sending δ → 0, it yields v = 0 on Γ × (0, T ). +If H ∈ (0, ∞], then divided both sides of (3.22) by √σµ and sending δ → 0, we have +I(t) +√σµ → +� T +0 +� +Γ +vJ H +D [ξ] = 0. +Employing the argument analogous to that in Subcase (1iii), for almost everywhere t ∈ (0, T ), we have +∇Γv = 0 and +� T +0 +� +Γ vm(t) = 0, which implies v = 0 on Γ × (0, T ). +Step 2. Effective boundary conditions for the Neumann problem (1.2). +Let ξ ∈ C∞(Ω1 × [0, T ]) with ξ = 0 at t = T and extend the test function ξ to Ω × [0, T ] by defining +ξ(x, t) = +� +ξ(x, t), +x ∈ Ω1, +π(p(x), r(x), t), +x ∈ Ω2, +where π is introduced in (3.7). It is easy to see that ξ ∈ W 1,1 +2,0 (QT ). +Thanks to the weak convergence of {u}δ>0, as δ → 0, it follows from Definition 3.1 that +L[u, ξ] → L[v, ξ] = −lim +δ→0 +� T +0 +� +Ω2 +∇π · A∇udxdt. +(3.30) +In the following, we focus on the right-hand side of (3.30). By using the curvilinear coordinates (s, r) in +(3.3), it can be rewritten as +RHS := − +� T +0 +� +Γ +� δ +0 +(σπrur + µ∇sπ∇su) (1 + 2Hr + κr2) +− +� T +0 +� +Γ +� π +0 +(σπrur + µ∇Γπ∇Γu) − +� T +0 +� +Γ +� δ +0 +(σπrur + µ∇Γπ∇Γu)(2Hr + κr2) +− +� T +0 +� +Γ +� δ +0 +µ(∇sπ∇su − ∇Γπ∇Γu)(1 + 2Hr + κr2) +=:I + II + III. +(3.31) +As noted, write down +I = − +� δ +0 +� +Γ +(σπrur + µ∇Γπ∇Γu) = √σµ +� +Γ +u(p(s), t)ΠR(s, 0). +(3.32) +Using the same estimates as in (3.26) and (3.27), we get +|II + III| ≤O(δ) +� T +0 +1 +√ +t +�� +Γ +σ|πr(s, 0, t)| +�1/2 +dt = O(δ) +√ +T(σµ)1/4||ΠR||1/2 +L∞(Γ). +(3.33) +13 + +Next, we consider the following cases (1)σµ → 0, (2)√σµ → γ ∈ (0, ∞), (3)σµ → ∞. +Case 1. σµ → 0 as δ → 0. By (3.8), (3.18) and (3.19), we have +RHS ≤ O(1) +� T +0 +�� δ +0 +� +Γ +σπ2 +r + µ|∇Γπ|2 +�1/2 �� +Ω +∇u · A∇u +�1/2 +dt = O(1) +√ +T(σµ)1/4, +where H¨older inequality and Lemma 2.1 were used. So, we have L[v, ξ] = 0, implying v satisfies ∂v +∂n = 0 +on Γ × (0, T ). +Case 2. √σµ → γ ∈ (0, ∞) as δ → 0. +Subcase (2i). µδ → 0 as δ → 0. In this case, h → 0. In terms of (3.18), (3.19), (3.32) and (3.33), we have +I → 0 and |II + III| → 0, from which we have L[v, ξ] = 0. So, v satisfies ∂v +∂n = 0 on Γ × (0, T ). +Subcase (2ii). +µδ → β ∈ (0, ∞] as δ → 0. +In this case, h → H = β/γ ∈ (0, ∞]. +As δ → 0, +it follows from (3.19) and (3.33) that I → γ +� T +0 +� +Γ vJ β/γ +N +[ξ] and |II + III| → 0, from which we get +L[v, ξ] = γ +� T +0 +� +Γ vJ β/γ +N +[ξ]. So, v satisfies k ∂v +∂n = γJ β/γ +N +[v] on Γ × (0, T ). +Case 3.σµ → ∞ as δ → 0. +Subcase (3i). µδ → β ∈ [0, ∞). In this case, h → 0. By virtue of (3.18) and (3.32), it holds that +I = µδ +� T +0 +� +Γ +� +∆Γξ + O(h2) +� +u → β +� T +0 +� +Γ +v∆Γξ. +Additionally, by (3.18) and (3.33), |II + III| → 0 as δ → 0. Consequently, we get +L[v, ξ] = β +� T +0 +� +Γ +v∆Γξ. +(3.34) +Our next task is to prove that v is the weak solution of (3.1) with the boundary condition k ∂v +∂n = β∆Γv +on Γ × (0, T ). To this end, it remains to show v ∈ L2 � +(0, T ); H1(Γ) +� +. +We start by asserting that v is the unique weak solution of (3.1), which satisfies (3.34) as well. +It suffices to prove v = v. +Now consider v − v, without loss of generality, also denoted by v. +We +then points out that v is the weak solution of (3.1) with u0 = f = 0. In particular, by Lemma 2.1, +v ∈ V 1,0 +2 +(Ω1 × (0, T )) ∩ W 1,1 +2 +(Ω1 × (t0, T )). +For any small t0 ∈ (0, T ), fix t1 ∈ (t0, T ]. As δ → 0, (3.22) is transformed into +� t1 +t0 +� +Ω1 +(vtξ + k∇v∇ξ)dxdt = β +� t1 +t0 +� +Γ +v∆Γξdsdt. +(3.35) +Furthermore, take the test function ξ = w(s, t)η(r) with the following assumptions: η = η(r) is a cut-off +function in the r variable with 0 ≤ η ≤ 1, satisfying η ∈ C∞(−∞, 0], η = 1 for −ǫ ≤ r ≤ 0 and η = 0 for +r ≤ −2ǫ; w(s, t) ∈ C2(Γ × [0, T ]). From (3.35), we are led to +β +���� +� t1 +t0 +� +Γ +v∆Γξdsdt +���� = +���� +� t1 +t0 +� +Ω1 +(vtξ + k∇v∇ξ)dxdt +���� ≤ C∥v∥W 1,1 +2 +(Ω1×(t0,t1))∥w∥L2((t0,t1);H1(Γ)).(3.36) +Consider such w with +� t1 +t0 +� +Γ +wdsdt = 0. +We then define a linear functional: w → +� t1 +t0 +� +Γ v∆Γwdsdt, which is well-defined by (3.36). This functional +can be extended to the Hilbert space +H = {w ∈ L2 � +(t0, t1); H1(Γ) +� +: +� t1 +t0 +� +Γ +wdsdt = 0} +14 + +with the inner product as < w1, w2 >:= − +� t1 +t0 +� +Γ ∇Γw1 · ∇Γw2. From Riesze representation theorem, +there is some z ∈ H satisfying +− +� t1 +t0 +� +Γ +∇Γz · ∇Γwdsdt = +� t1 +t0 +� +Γ +v∆Γwdsdt = +� t1 +t0 +� +Γ +z∆Γwdsdt. +(3.37) +Consequently, it follows from (3.37) that +� t1 +t0 +� +Γ(v − z)∆Γw = 0. By Riesze theorem again, this means +that v − z = m(t) for some function m(t) ∈ H and thus v ∈ L2 � +(0, T ); H1(Γ) +� +. Going back to (3.35), +from Lemma 2.1, we have +� +Ω1 +v2(x, t1)dxdt ≤ +� +Ω1 +v2(x, t0)dxdt, +from which we are done by sending t0 → 0 for Subcase (3i). +Subcase (3ii). µδ → ∞ as δ → 0. In this case, h → H ∈ [0, ∞] after passing to a subsequence. If H = 0, +then divided both sides of the equation (3.28) by µδ and sending δ → 0, we obtain +� T +0 +� +Γ v∆Γξ = 0, +implying that v(·) = m(t) on Γ for almost everywhere t ∈ (0, T ). +If H ∈ (0, ∞], then divided both sides of (3.28) by √σµ and sending δ → 0, we obtain +� T +0 +� +Γ vJ H +N [ξ] = +0, implying that v(·) = m(t) on Γ for almost everywhere t ∈ (0, T ). We further take a special test function +ξ = ξ(t) on Γ and a constant extension in Ω2 such that ξ = ξ(t), resulting in L[v, ξ] = 0. So, v satisfies +� +Γ +∂v +∂n = 0 on Γ × (0, T ). +Therefore, we accomplish the whole proof. +We conclude this section by asking a natural question: what is the effective boundary condition if +two eigenvalues of the coating in the tangent directions are not identical? That is to say, A(x) has two +different eigenvalues in the tangent directions. We answer this question by considering Type II condition +(1.5) in the next section. +4 +EBCs for Type II condition +In this section, we always assert that Γ is a topological torus and A(x) satisfies Type II condition (1.5). +The aim of this section is to address EBCs on Γ × (0, T ) as the thickness of the layer decreases to zero. +With the aid of the curvilinear coordinates (s, r), we choose a global parametrization p(s1, s2) on Γ +such that +τττ 1 = ps1 +|ps1|, +τττ 2 = ps2 +|ps2|. +More precisely, let Γ := Γ1 × Γ2 satisfy Γ1 = {p(s1, 0)|s1 ∈ [0, l1)} and Γ2 = {p(0, s2)|s2 ∈ [0, l2)}, where +p(s) is l1−periodic in s1 and l2−periodic in s2. In Ω2, the explicit formula of A(x) can be expressed as +A(x) = σn(p) ⊗ n(p) + µ1τττ 1(p) ⊗ τττ 1(p) + µ2τττ 2(p) ⊗ τττ 2(p). +Theorem 4.1. Suppose that Γ is a topological torus and A(x) is given in (1.1) or (1.2) and satisfies +(1.5). Let u0 ∈ L2(Ω) and f ∈ L2(QT ) with functions being independent of δ. Assume further that +without loss of generality, µ1 > µ2. Moreover, σ, µ1, and µ2 satisfy the scaling relationships +lim +δ→0 +µ2 +µ1 += c ∈ [0, 1], +lim +δ→0 +σ +δ = α ∈ [0, 1], +lim +δ→0 σµi = γi ∈ [0, ∞], +lim +δ→0 µiδ = βi ∈ [0, ∞], +i = 1, 2. +(i) If c ∈ (0, 1], then as δ → 0, u → v weakly in W 1,0 +2 +(Ω1 × (0, T )), strongly in C([0, T ]; L2(Ω1)), where v +is the weak solution of (3.1) subject to the effective boundary conditions listed in Table 2. +(ii) If c = 0 and lim +δ→0δ2µ1/µ2 = 0, then u → v weakly in W 1,0 +2 +(Ω1 × (0, T )), strongly in C([0, T ]; L2(Ω1)), +where v is the weak solution of (3.1) subject to the effective boundary conditions listed in Table 3. +15 + +Table 2: Effective boundary conditions on Γ × (0, T ) for c ∈ (0, 1]. +EBCs on Γ × (0, T ) for (1.1). +As δ → 0 +σ +δ → 0 +σ +δ → α ∈ (0, ∞) +σ +δ → ∞ +σµ1 → 0 +∂v +∂n = 0 +k ∂v +∂n = −αv +v = 0 +√σµ1 → γ1 ∈ (0, ∞) +k ∂v +∂n = γ1K∞ +D [v] +k ∂v +∂n = γ1Kγ1/α +D +[v] +v = 0 +σµ1 → ∞ +∇Γv = 0, +� +Γ +∂v +∂n = 0 +∇Γv = 0, +� +Γ(k ∂v +∂n + αv)dx = 0 +v = 0 +EBCs on Γ × (0, T ) for (1.2). +As δ → 0 +µ1δ → 0 +µ1δ → β1 ∈ (0, ∞) +µ1δ → ∞ +σµ1 → 0 +∂v +∂n = 0 +∂v +∂n = 0 +∂v +∂n = 0 +√σµ1 → γ1 ∈ (0, ∞) +∂v +∂n = 0 +k ∂v +∂n = γ1Kβ1/γ1 +N +[v] +k ∂v +∂n = γ1K∞ +N [v] +σµ1 → ∞ +∂v +∂n = 0 +k ∂v +∂n = β1 +� +∂2v +∂τττ 2 +1 + c ∂2v +∂τττ 2 +2 +� +∇Γv = 0, +� +Γ +∂v +∂n = 0 +Table 3: Effective boundary conditions on Γ × (0, T ) for c = 0. +EBCs on Γ × (0, T ) for (1.1). +As δ → 0 +σ +δ → 0 +σ +δ → α ∈ (0, ∞) +σ +δ → ∞ +σµ1 → 0 +∂v +∂n = 0 +k ∂v +∂n = −αv +v = 0 +√σµ1 → γ1 ∈ (0, ∞) +k ∂v +∂n = γ1Λ∞ +D [v] +k ∂v +∂n = γ1Λγ1/α +D +[v] +v = 0 +σµ1 → ∞, σµ2 → 0 +∂v +∂τττ 1 = 0, +� +Γ1 +∂v +∂n = 0 +∂v +∂τττ 1 = 0, +� +Γ1 +� ∂v +∂n + αv +� += 0 +v = 0 +σµ1 → ∞, +√σµ2 → γ2 ∈ (0, ∞) +∂v +∂τττ 1 = 0, +� +Γ1 +� +k ∂v +∂n − γ2D∞ +D [v] +� += 0 +∂v +∂τττ 1 = 0, +� +Γ1 +� +k ∂v +∂n − γ2Dγ2/α +D +[v] +� += 0 +v = 0 +σµ1 → ∞, σµ2 → ∞ +∇Γv = 0, +� +Γ +∂v +∂n = 0 +∇Γv = 0, +� +Γ +∂v +∂n = 0 +v = 0 +EBCs on Γ × (0, T ) for (1.2). +As δ → 0 +µ1δ → 0 +µ1δ → β1 ∈ (0, ∞) +µ1δ → ∞ +σµ1 → 0 +∂v +∂n = 0 +∂v +∂n = 0 +∂v +∂n = 0 +√σµ1 → γ1 ∈ (0, ∞) +∂v +∂n = 0 +k ∂v +∂n = γ1Λβ1/γ1 +N +[v] +k ∂v +∂n = γ1Λ∞ +N [v] +σµ1 → ∞ +∂v +∂n = 0 +k ∂v +∂n = β1 ∂2v +∂τττ 2 +1 +see next table +As µ1δ → ∞, σµ1 → ∞ +µ2δ → 0 +µ2δ → β2 ∈ (0, ∞) +µ2δ → ∞ +σµ2 → 0 +∂v +∂τττ 1 = 0, +� +Γ1 +∂v +∂n = 0 +∂v +∂τττ 1 = 0, +� +Γ1 +∂v +∂n = 0 +∂v +∂τττ 1 = 0, +� +Γ1 +∂v +∂n = 0 +√σµ2 → γ2 ∈ (0, ∞) +∂v +∂τττ 1 = 0, +� +Γ1 +∂v +∂n = 0 +∂v +∂τττ 1 = 0, +� +Γ1 +� +k ∂v +∂n − γ2Dβ2/γ2 +N +[v] +� += 0 +∂v +∂τττ 1 = 0, +� +Γ1 +� +k ∂v +∂n − γ2D∞ +N [v] +� += 0 +σµ2 → ∞ +∂v +∂τττ 1 = 0, +� +Γ1 +∂v +∂n = 0 +∂v +∂τττ 1 = 0, +� +Γ1 +� +k ∂v +∂n − β2 ∂2v +∂τττ 2 +� += 0 +∇Γv = 0, +� +Γ +∂v +∂n = 0 +16 + +The boundary condition +∂v +∂τττ 1 = 0 on Γ × (0, T ) means that v is a constant in s1 on Γ, but it may +depend on s2 and t. The boundary condition k ∂v +∂n = β1 +� +∂2v +∂τττ 2 +1 + c ∂2v +∂τττ 2 +2 +� +can be viewed as a second-order +partial differential equation on Γ. +For H ∈ (0, ∞], with a smooth g(s) being l1−periodic in s1 and l2−periodic in s2, KH +D and KH +N +in Table 2 are defined by +� +KH +D[g], KH +N[g] +� +(s) := (ΨR(s, 0), ΦR(s, 0)), where Ψ and Φ are, respectively, +bounded solutions of +� +ΨRR + Ψs1s1 + cΨs2s2 = 0, +R2 × (0, H), +Ψ(s, 0) = g(s), +Ψ(s, H) = 0, +� +ΦRR + Φs1s1 + cΦs2s2 = 0, +R2 × (0, H), +Φ(s, 0) = g(s), +ΦR(s, H) = 0. +ΛH +D and ΛH +N in Table 3 are defined by +� +ΛH +D[g], ΛH +N[g] +� +(s) := +� +Ψ0 +R(s, 0), Φ0 +R(s, 0) +� +, where Ψ0 and Φ0 are +the bounded solutions of +� +Ψ0 +RR + Ψ0 +s1s1 = 0, +R2 × (0, H), +Ψ0(s, 0) = g(s), +Ψ0(s, H) = 0, +� +Φ0 +RR + Φ0 +s1s1 = 0, +R2 × (0, H), +Φ0(s, 0) = g(s), +Φ0 +R(s, H) = 0. +Finally, DH +D and DH +N are defined by +� +DH +D[g], DH +N [g] +� +(s2) := (ΨR(s2, 0), ΦR(s2, 0)), where Ψ(s2, R) and +Φ(s2, R) are the bounded solutions of +� ΨRR + Ψs2s2 = 0, +R × (0, H), +Ψ(s2, 0) = g(s2), +Ψ(s2, H) = 0, +� ΦRR + Φs2s2 = 0, +R × (0, H), +Φ(s2, 0) = g(s2), +ΦR(s2, H) = 0. +4.1 +Definition, existence and uniqueness of weak solutions of effective models +We define weak solutions of (3.1) together with some new boundary conditions from Table 2 and 3. +Definition 4.1. Let the test function ξ ∈ C∞(Q1 +T ) satisfy ξ = 0 at t = T . +(1) A function v is said to be a weak solution of (3.1) with the boundary conditions +∂v +∂τττ 1 = 0 and +� +Γ1 +� +k ∂v +∂n − B[v] +� += 0, where B[v] = −αv, γ2DH +D[v] or γ2DH +N [v] for H ∈ (0, ∞], if v ∈ V 1,0 +2 +(Q1 +T ) and for +almost everywhere fixed t ∈ (0, T ), the trace of v on Γ is a constant in s1, and if for any test function ξ +satisfying +∂ξ +∂τττ 1 = 0 on Γ, v satisfies +L[v, ξ] = +� T +0 +� +Γ +vB[ξ]dsdt. +(2) A function v is said to be a weak solution of (3.1) with the boundary condition k ∂v +∂n = B[v], where +B[v] = γ1KH +D[v](KH +N [v]), or γ1ΛH +D[v](ΛH +N[v]) for H ∈ (0, ∞], if v ∈ V 1,0 +2 +(Q1 +T ) and if for any test function +ξ, v satisfies +L[v, ξ] = +� T +0 +� +Γ +vB[ξ]dsdt. +(3) A function v is a weak solution of (3.1) with the boundary condition k ∂v +∂n = β1 +� +∂2v +∂τττ 2 +1 + c ∂2v +∂τττ 2 +2 +� +for +c ∈ [0, 1], if v ∈ V 1,0 +2 +(Q1 +T ) with its trace belonging to L2 � +(0, T ); H1(Γ) +� +, and if for any test function ξ, v +satisfies +L[v, ξ] = −β1 +� T +0 +� +Γ +� ∂v +∂τττ 1 +∂ξ +∂τττ 1 ++ c ∂v +∂τττ 2 +∂ξ +∂τττ 2 +� +dsdt. +(4) A function v is said to be a weak solution of (3.1) with the boundary conditions +∂v +∂τττ 1 = 0 and +� +Γ1 +� +k ∂v +∂n − β2 ∂2v +∂τττ 2 +2 +� += 0, if v ∈ V 1,0 +2 +(Q1 +T ) with its trace belonging to L2 � +(0, T ); H1(Γ) +� +and being a +constant in s1, and if for any test function ξ satisfying +∂ξ +∂τττ 1 = 0 on Γ, v satisfies +L[v, ξ] = −β2 +� T +0 +� +Γ +∂v +∂τττ 2 +∂ξ +∂τττ 2 +dsdt. +Theorem 3.2 also works for the existence and uniqueness of weak solutions of (3.1) together with +above boundary conditions. +17 + +4.2 +Auxiliary functions +We are now in a position to construct two auxiliary functions for Type II condition (1.5). For every +t ∈ [0, T ], let ψ(s, r, t) and φ(s, r, t) be bounded solutions of +� +σψrr + µ1ψs1s1 + µ2ψs2s2 = 0, +R2 × (0, δ), +ψ(s, 0, t) = g(s), +ψ(s, δ, t) = 0, +(4.1) +� σφrr + µ1φs1s1 + µ2φs2s2 = 0, +R2 × (0, δ), +φ(s, 0, t) = g(s), +φr(s, δ, t) = 0, +(4.2) +where g(s) := ξ(s, 0, t) is l1−periodic in s1 and l2−periodic in s2. Let r = R +� +σ/µ1 and suppress the +time dependence. Then, we define +Ψδ(s, R) := ψ(s, R +� +σ/µ1, t), +Φδ(s, R) := φ(s, R +� +σ/µ1, t). +Plugging r into (4.1) and (4.2) leads to +� Ψδ +RR + Ψδ +s1s1 + µ2 +µ1 Ψδ +s2s2 = 0, +R2 × (0, h1), +Ψδ(s, 0) = g(s), +Ψδ(s, h1) = 0, +(4.3) +� Φδ +RR + Φδ +s1s1 + µ2 +µ1 Φδ +s2s2 = 0, +R2 × (0, h1), +Φδ(s, 0) = g(s), +Φδ +R(s, h1) = 0, +(4.4) +where h1 = δ +� +σ/µ1. We next estimate the size of Ψδ +R(s, 0) and Φδ +R(s, 0) when the thickness of the thin +layer is sufficiently small. +In the case that h1 → 0 as δ → 0, from the maximum principle, we get +����Ψδ +R(s, 0) + g(s) +h1 +���� = +����� +1 +h1 +� h1 +0 +Ψδ +R(s, 0) − Ψδ +R(s, 0)dR +����� ≤ h1∥Ψδ +RR∥L∞(R2×(0,h)) ≤ h1∥g∥C2(R2), +from which it follows that +Ψδ +R(s, 0) = 1 +h1 +� +−g(s) + O(h2) +� +. +For the case that h1 ∈ (0, ∞] as δ → 0, from Taylor expansion on Ψδ, we obtain +∥Ψδ +R(s, 0)∥L∞(R2) ≤ 2 +R∥Ψδ∥L∞(R2×(0,h)) + R∥Ψδ +RR∥L∞(R2×(0,h)) ≤ 3∥g∥C2(R2) +R +. +As for (4.4), the maximum principle also applies to Φδ +R(s, 0). For the case h1 → 0 as δ → 0, we get +∥Φδ +RRRR∥L∞(R2×(0,h1)) = ∥ �∆δ +Γ(�∆δ +ΓΦδ)∥L∞(R2×(0,h1)) = ∥ �∆δ +Γ(�∆δ +Γg)∥L∞(R2), +where �∆δ +ΓΦδ := Φδ +s1s1 + µ2 +µ1 Φδ +s2s2. Since Φδ +RRR(s, h1) = − �∆δ +ΓΦδ +R(s, h1) = 0, for all s ∈ R2, as a result, we +derive +∥Φδ +RRR∥L∞(R2×(0,h1)) ≤ h1∥Φδ +RRRR∥L∞(R2×(0,h1)) ≤ h1∥ �∆δ +Γ(�∆δ +Γg)∥L∞(R2). +Combining this with the boundary condition Φδ +RR(s, 0) = − �∆δ +ΓΦδ(s, 0) = − �∆δ +Γg(s), we arrive at +���Φδ +R(s, 0) − h1 �∆δ +Γg(s) +��� = +����� +� h1 +0 +�∆δ +Γg(s) + Φδ +RR(s, R)dR +����� = O(h3 +1), +which results from +∥Φδ +RR + �∆δ +Γg∥L∞(R2×(0,h1)) ≤ h1∥Φδ +RRR∥L∞(R2×(0,h1)) = O(h2 +1). +18 + +For the case that h1 → (0, ∞] as δ → 0, we have ∥Φδ +R(s, 0)∥L∞(R2) = O(1). We summarize as +√σµ1∥Ψδ +R(s, 0)∥L∞(R2) = + + + +σ +δ +� +−g(s) + O(h2) +� +, +if h1 → 0 as δ → 0, +O(√σµ1), +if h1 ∈ (0, ∞] as δ → 0; +(4.5) +√σµ1∥Φδ +R(s, 0)∥L∞(R2) = + + + +µ1δ +� +�∆δ +Γg(s) + O(h2 +1) +� +, +if h1 → 0 as δ → 0, +O(√σµ1), +if h1 ∈ (0, ∞] as δ → 0. +(4.6) +Before diving further, we are intended to consider the limiting equation as δ → 0. In the case of +c ∈ (0, 1], if h1 → H ∈ (0, ∞] as δ → 0, then (4.3) and (4.4) give +� ΨRR + Ψs1s1 + cΨs2s2 = 0, +R2 × (0, H), +Ψ(s, 0) = g(s), +Ψ(s, H) = 0; +� ΦRR + Φs1s1 + cΦs2s2 = 0, +R2 × (0, H), +Φ(s, 0) = g(s), +ΦR(s, H) = 0. +(4.7) +It is easy to see that each of them has a unique bounded solution. We define +� +KH +D[g], KH +N[g] +� +(s) := +(ΨR(s, 0), ΦR(s, 0)) . Furthermore, for H ∈ (0, ∞), their analytic formulas are given by +KH +D[g](s) = − +∞ +� +n=1 +� +�λn�en(s)�gn(−1 + +1 +tanh( +� +�λnH) +) − +∞ +� +n=1 +� +�λn�en(s)�gn, +(4.8) +KH +N [g](s) = − +∞ +� +n=1 +� +�λn�en(s)�gn(−1 + tanh( +� +�λnH)) − +∞ +� +n=1 +� +�λn�en(s)�gn, +(4.9) +where �λn and �en(s) are the eigenvalues and the corresponding eigenfunctions of − �∆Γ := +∂2 +∂s2 +1 + c ∂2 +∂s2 +2 +defined on Γ with �gn =< �en, g >:= +� +Γ �engds. Moreover, K∞ +D [g](s) = K∞ +N [g](s) = −(− �∆Γ)1/2g(s). +On the other hand, in the case of c = 0, if h → H ∈ (0, ∞] as δ → 0, then it is easy to find that the +limits of (4.3) and (4.4) are degenerate in the following form +� +Ψ0 +RR + Ψ0 +s1s1 = 0, +R2 × (0, H), +Ψ0(s, 0) = g(s), +Ψ0(s, H) = 0, +� +Φ0 +RR + Φ0 +s1s1 = 0, +R2 × (0, H), +Φ0(s, 0) = g(s), +Φ0 +R(s, H) = 0. +(4.10) +Moreover, we define +ΛH +D[g](s) := Ψ0 +R(s, 0), +ΛH +N[g](s) := Φ0 +R(s, 0). +The analytic formulas for ΛH +D[g](s) and ΛH +N[g](s) are given using separation of variables. It is straight- +forward to see that +ΛH +D[g](s) = −S(s2) +∞ +� +n=1 +4πn +l2 +1 +coth +�2πnH +l1 +� � l1 +0 +g(z)cos +�2πn(z − s) +l1 +� +dz, +(4.11) +ΛH +N[g](s) = −S(s2) +∞ +� +n=1 +4πn +l2 +1 +tanh +�2πnH +l1 +� � l1 +0 +g(z)cos +�2πn(z − s) +l1 +� +dz, +(4.12) +where S(s2) is a periodic function in s2 ∈ [0, l2). +From now on, we discuss the existence and uniqueness of the solution of (4.10), which is of interest +in its own right. We refer to the book [17] and the references therein. +From the maximum principle, it turns out that Ψδ and Φδ are uniformly bounded and equicontinuous +on the compact subsets of R2 × (0, H). Consequently, the Arzela-Ascoli compact theorem ensures that +Ψδ → Ψ0, +Φδ → Φ0 +19 + +locally uniformly in R2 × (0, H) after passing to a subsequence of δ → 0; moreover, the limiting functions +Ψ0 ∈ C +� +R2 × (0, H) +� +and +Φ0 ∈ C +� +R2 × (0, H) +� +. +Our next task is to establish the uniqueness of Ψ0 and Φ0. Following this goal, let Ψ0 +1 and Ψ0 +2 be two +solutions of the former equation in (4.10); let Φ0 +1 and Φ0 +2 be two solutions of the latter equation in (4.10). +Without loss of generality, consider Ψ0 = Ψ0 +1 − Ψ0 +2 and Φ0 = Φ0 +1 − Φ0 +2, satisfying +� +Ψ0 +RR + Ψ0 +s1s1 = 0, +R2 × (0, H), +Ψ0(s, 0) = 0, +Ψ0(s, H) = 0; +� +Φ0 +RR + Φ0 +s1s1 = 0, +R2 × (0, H), +Φ0(s, 0) = 0, +Φ0 +R(s, H) = 0. +(4.13) +Suppressing the s2 variable, letting W(s1, R) := Ψ0(s1, s2, R) and V (s1, R) := Φ0(s1, s2, R), we have +� +WRR + Ws1s1 = 0, +R × (0, H), +W(s, 0) = 0, +W(s, H) = 0, +� +VRR + Vs1s1 = 0, +R × (0, H), +V (s, 0) = 0, +VR(s, H) = 0. +From the maximum principle, it turns out that W = V = 0. Thus, the assertion of uniqueness of Ψ0 and +Φ0 is completed. +4.3 +Proof of Theorem 4.1 +The goal of this subsection is to prove Theorem 4.1 and address EBCs on Γ × (0, T ). +The proof of Theorem 4.1. By Theorem 2.2, given any subsequence of δ, we can ensure that u → v weakly +in W 1,0 +2 +(Ω1 × (0, T )), and strongly in C +� +[0, T ]; L2(Ω1) +� +after passing to a further subsequence. We will +show that v is a weak solution of (3.1) with effective boundary conditions listed in Table 2 and 3. Because +of the uniqueness proved in Theorem 3.2, u → v without passing to any subsequence of δ > 0. +Our proof contains two steps: one is for the Dirichlet problem (1.1), and the other is for the Neumann +problem (1.2). +Step 1. Effective boundary conditions for the Dirichlet problem (1.1). +Let ξ ∈ C∞(Ω1 × [0, T ]) with ξ = 0 at t = T and extend ξ to the domain Ω × (0, T ) by defining +ξ(x, t) = +� +ξ(x, t), +x ∈ Ω1, +ψ(s(x), r(x), t), +x ∈ Ω2, +where ψ is the solution of the elliptic problem (4.1) and ξ ∈ W 1,1 +2,0 (QT ). +By the weak convergence of {u}δ>0, it follows from Definition 3.1 that as δ → 0, +L[u, ξ] −→L[v, ξ] = −lim +δ→0 +� T +0 +� +Ω2 +∇ψ · A∇udxdt. +(4.14) +In the curvilinear coordinates (s, r), the right-hand side of (4.14) gives +RHS := − +� T +0 +� +Ω2 +∇ψ · A∇udxdt = − +� T +0 +� δ +0 +� +Γ +(σψrur + ∇Γψ · A∇Γu) +− +� T +0 +� δ +0 +� +Γ +(σψrur + ∇Γψ · A∇Γu)(2Hr + κr2) +− +� T +0 +� δ +0 +� +Γ +(∇sψ · A∇su − ∇Γψ · A∇Γu)(1 + 2Hr + κr2) +=I + II + III. +(4.15) +Multiplying (4.3) by u and performing integration by parts, we obtain +I := +� δ +0 +� l2 +0 +� l1 +0 +(σψrur + µ1ψs1us1 + µ2ψs2us2) ds1ds2dr = − +� l2 +0 +� l1 +0 +σψr(s, 0, t)uds1ds2. (4.18) +20 + +Subsequently, it follows from (4.3) and (4.5) that +|II| = O(δ) +√ +T(σµ1)1/4(||Ψδ +R(s, 0)||L∞(R2))1/2, +(4.19) +where Lemma 2.1 and H¨older inequality were used. +By virtue of (3.4), (4) and (4.1), using Taylor expansion on gij(s, r), after a tedious calculation, we +get +|III| =O +� +δ +�µ1 +µ2 ++ δ2 µ1 +µ2 +� � T +0 +�� +Γ +� δ +0 +σψ2 +r + ∇Γψ · A∇Γψ +�1/2 +dt +=O +� +δ +�µ1 +µ2 ++ δ2 µ1 +µ2 +� √ +T(σµ1)1/4||ΨR(s, 0)||1/2 +L∞(Γ), +(4.20) +where we have used Lemma 2.1. +In the following, we consider cases: (1) σ +δ → 0, (2) σ +δ → α ∈ (0, ∞), (3) σ +δ → ∞. +Case 1. σ +δ → 0 as δ → 0. +Subcase (1i). σµ1 → 0 as δ → 0. In view of (4.14) - (4.20), by H¨older inequality, we have +|RHS| ≤C +� T +0 +�� δ +0 +� l2 +0 +� l1 +0 +� +σψ2 +r + µ1ψ2 +s1s1 + µ2ψ2 +s2s2 +� +�1/2 +dt ≤ C( +√ +T)max{σ +δ , √σµ1 }, +where (4.5) and Lemma 2.1 were used. +Thus, we have L[v, ξ] = 0, showing that v satisfies the boundary condition ∂v +∂n = 0 on Γ × (0, T ). +Subcase (1ii). √σµ1 → γ1 ∈ (0, ∞) as δ → 0. In this case, h1 → ∞. From (4.18), as δ → 0, if c ∈ (0, 1], +we then have +I = √σµ1 +� l2 +0 +� l1 +0 +Ψδ +R(s, 0)u → γ1 +� +Γ +vK∞ +D [ξ]. +Otherwise, if c = 0, then I → γ1 +� +Γ vΛ∞ +D [ξ] as δ → 0, where K∞ +D [ξ] and Λ∞ +D [ξ] are defined in (4.7) and +(4.10), respectively. +Because of the assumption that δ +� +µ1/µ2 → 0 as δ → 0, (4.19) and (4.20) give |II+III| → 0 as δ → 0. +Hence, for c ∈ (0, 1], we obtain +L[v, ξ] = γ1 +� T +0 +� +Γ +vK∞ +D [ξ], +which means that v satisfies k ∂v +∂n = γK∞ +D [v] on Γ × (0, T ); for c = 0, we have +L[v, ξ] = γ1 +� T +0 +� +Γ +vΛ∞ +D [ξ], +which means that v satisfies the boundary condition k ∂v +∂n = γ1Λ∞ +D [v] on Γ × (0, T ). +Subcase (1iii). σµ1 → ∞ as δ → 0. In this case, h1 → ∞ as δ → 0. Divided both sides of (4.14) by +√σµ1 and sending δ → 0, combining (4.18)-(4.20), we obtain +� T +0 +� +Γ +vK∞ +D [g] = 0, for c ∈ (0, 1] and +� T +0 +� +Γ +vΛ∞ +D [g] = 0, for c = 0. +(4.21) +In the case of c ∈ (0, 1], (4.21) yields ∇Γv = 0. By the similar proof in Subcase (1iii) from Step 1 in last +section, v satisfies the boundary condition +� +Γ +∂v +∂n = 0. +21 + +In the case of c = 0, it follows from (4.11) and (4.21) that vs1 = 0. From now on, choose the test +function ξ satisfying ξs1 = 0 on Γ. Let ψ be a constant in s1, and ψ = ψ(s2, r, t) is defined by +� σψrr + µ2ψs2s2 = 0, +R × (0, δ), +ψ(s2, 0, t) = g(s2), +ψ(s2, δ, t) = 0, +(4.22) +where g(s2) := ξ(s2, 0, t). The right-hand side of (4.14) now depends on the relationship of δ, σ and µ2. +Continuing what we have done in last section, let r = R +� +σ/µ2 and Ψ(s2, R) = ψ(s2, r, t). Substituting +these into (4.22) leads to +� ΨRR + Ψs2s2 = 0, +R × (0, h2), +Ψ(s2, 0) = g(s2), +Ψ(s2, h2) = 0, +(4.23) +where h2 = δ +� +µ2/σ. Moreover, define Dh +D[g] := ΨR(s2, 0). We estimate the size of ΨR(s2, 0) as in (4.5), +resulting in +√σµ2∥Ψδ +R(s2, 0)∥L∞(R) = + + + +σ +δ +� +−g(s2) + O(h2 +2) +� +, +if h2 → 0 as δ → 0, +O(√σµ2), +if h2 ∈ (0, ∞] as δ → 0. +(4.24) +If σµ2 → 0 as δ → 0, then by the similar argument in Subcase (1i), we get L[v, ξ] = 0, showing that +v satisfies the boundary condition +� +Γ1 +∂v +∂n = 0; if √σµ2 → γ2 ∈ (0, ∞) as δ → 0, then by the similar +argument in Subcase (1ii), we get +L[v, ξ] = γ2 +� T +0 +� +Γ +vD∞ +D [ξ], +showing that v satisfies +� +Γ1 +� +k ∂v +∂n − γ2D∞ +D [v] +� += 0; if σµ2 → ∞ as δ → 0, then by the similar argument +in Subcase (1iii) from Section 3.2, we have +� T +0 +� +Γ +vD∞ +D [g] = 0, +(4.25) +which indicates that vs2 = 0 on Γ × (0, T ). Thus, v is a constant on Γ in the spatial variable. Assume +further that ξ = m(t) on Γ and ψ(s, r, t) = (1 − r/δ) m(t). Using the same technique in (3.28), we get +L[v, ξ] = 0 from which v satisfies +� +Γ +∂v +∂n = 0 on Γ × (0, T ). +Case 2. σ +δ → α ∈ (0, ∞) as δ → 0. +Subcase (2i). σµ1 → 0. In this case, h → 0. A combination of (4.15) − (4.20) gives rise to +L[v, ξ] = −α +� T +0 +� +Γ +vξ, +from which v satisfies the boundary condition k ∂v +∂n = −αv on Γ × (0, T ). +Subcase (2ii). √σµ1 → γ1 ∈ (0, ∞). Like what we did in Subcase (1ii), as δ → 0, if c ∈ (0, 1], we have +L[v, ξ] = γ1 +� T +0 +� +Γ +vKγ1/α +D +[ξ], +resulting in the boundary condition k ∂v +∂n = γKγ1/α +D +[v]. On the other hand, if c = 0, we then have +L[v, ξ] = γ1 +� T +0 +� +Γ +vΛγ1/α +D +[ξ], +implying that v satisfies k ∂v +∂n = γ1Λγ1/α +D +[v] on Γ × (0, T ). +22 + +Subcase (2iii). σµ1 → ∞ as δ → 0. Following the proof of Subcase (1iii), we are led to +� T +0 +� +Γ +vK∞ +D [g] = 0 for c ∈ (0, 1], +� T +0 +� +Γ +vΛ∞ +D [g] = 0, for c = 0. +Therefore, if c ∈ (0, 1], then v satisfies the boundary condition +� +Γ +∂v +∂n = 0. On the other hand, if c = 0, +then vs1 = 0. By further taking ξ = ξ(s2, r, t) and ψ to be defined in (4.22), performing the procedure in +Subcase (1iii), we arrive at the following results: if σµ2 → 0 as δ → 0, then +� +Γ1 +∂v +∂n = 0 on Γ × (0, T ); if +√σµ2 → γ2 ∈ (0, ∞) as δ → 0, then +� +Γ1 +� +k ∂v +∂n − γ2D∞ +D [v] +� += 0; +if σµ2 → ∞ as δ → 0, then ∇Γv = 0 and +� +Γ +� +k ∂v +∂n + αv +� += 0 on Γ × (0, T ). +Case 3. σ +δ → ∞ as δ → 0. +Subcase (3i). √σµ1 → γ1 ∈ [0, ∞). In this case, h → 0. In view of (4.15)- (4.20), divided both sides +of (4.14) by σ/δ and sending δ → 0, we get +� T +0 +� +Γ vξ = 0, from which v satisfies the boundary condition +v = 0 on Γ × (0, T ). +Subcase (3ii). σµ1 → ∞ as δ → 0. For the case of c ∈ (0, 1], using the similar proof in Subcase (3ii) in +Section 3.2, we have v = 0. +On the other hand, for the case of c = 0, and h → H ∈ [0, ∞] as δ → 0. In view of (4.14)-(4.20) and +(4.10), if H = 0, then v satisfies the boundary condition v = 0. Otherwise, if H ∈ (0, ∞], we obtain +� T +0 +� +Γ +vΛH +D[ξ] = 0, +showing that vs1 = 0. Again, by taking ξ = ξ(s2, r, t) and ψ defined in (4.22), performing the procedure +in Subcase (1iii), we have v = 0 on Γ × (0, T ). +Step 2. Effective boundary conditions for the Neumann problem (1.2). +Let ξ ∈ C∞(Ω1 × [0, T ]) with ξ = 0 at t = T . We extend ξ to the domain Ω × (0, T ) by defining +ξ(x, t) = +� +ξ(x, t), +x ∈ Ω1, +φ(s(x), r(x), t), +x ∈ Ω2, +where φ is the unique solution of (4.2) and ξ ∈ W 1,1 +2 +(QT ). +Due to the weak convergence of u → v as δ → 0, it follows from Definition 2.1 that +L[u, ξ] = − +� T +0 +� +Ω2 +∇φ · A∇udxdt → L[v, ξ] = −lim +δ→0 +� T +0 +� +Ω2 +∇φ · A∇udxdt. +(4.26) +In the curvilinear coordinates, rewrite the right-hand side of (4.26) as +RHS := − +� T +0 +� +Ω2 +∇φ · A∇udxdt = I + II + III, +where +I := − +� T +0 +� δ +0 +� +Γ +(σφrur + ∇Γφ · A∇u) dsdrdt = − +� T +0 +� δ +0 +� l2 +0 +� l1 +0 +σφr(s, 0, t)uds1ds2dt, (4.27) +|II| =O(δ) +� T +0 +�� +Γ +� δ +0 +σφ2 +r + ∇Γφ · A∇Γφ +�1/2 +dt = O(δ) +√ +T(σµ1)1/4(||Φδ +R(s, 0)||L∞(Γ))1/2, (4.28) +23 + +and +|III| =O +� +δ +�µ1 +µ2 ++ δ2 µ1 +µ2 +� √ +T(σµ1)1/4||Φδ +R(s, 0)||1/2 +L∞(Γ). +(4.29) +Next, we consider the following cases: (1)σµ1 → 0, (2)√σµ1 → γ ∈ (0, ∞), (3)σµ1 → ∞. +Case 1. σµ1 → 0 as δ → 0. Using H¨older inequality and (4.2), we get +|RHS| =O(1) +� T +0 +�� δ +0 +� +Γ +σφ2 +r + ∇Γφ · A∇Γφ +�1/2 �� +Ω +∇u · A∇udx +�1/2 +dt = O( +√ +T )(σµ1)1/4, +where (4.6) and Lemma 2.1 were used. Thus, we have L[v, ξ] = 0, showing that v satisfies the boundary +condition ∂v +∂n = 0 on Γ × (0, T ). +Case 2. √σµ1 → γ1 ∈ (0, ∞) as δ → 0. +Subcase (2i). µ1δ → 0 as δ → 0. In this case, h = µ1δ/√σµ1 → 0. Thanks to (4.28)- (4.29), we obtain +I → 0 +and +|II + III| → 0, +where (4.6) was used. From this, it turns out that L[v, ξ] = 0, showing that v satisfies the effective +boundary condition ∂v +∂n = 0 on Γ × (0, T ). +Subcase (2ii). µ1δ → β1 ∈ (0, ∞] as δ → 0. In this case, h → β1/γ1 ∈ (0, ∞]. In view of (4.28)-(4.29) +and (4.6), we are led to +L[v, ξ] = γ1 +� T +0 +� +Γ +vKβ1/γ1 +N +[ξ] for c ∈ (0, 1], +L[v, ξ] = γ1 +� T +0 +� +Γ +vΛβ1/γ1 +N +[ξ] for c = 0, +from which v satisfies the boundary condition on Γ × (0, T ): k ∂v +∂n = γ1Kβ1/γ1 +N +[v] for c ∈ (0, 1] and +k ∂v +∂n = γ1Λβ1/γ1 +N +[v] for c = 0. +Case 3. σµ1 → ∞ as δ → 0. +Subcase (3i). µ1δ → β1 ∈ [0, ∞). In this case, h1 → β1/γ1 ∈ [0, ∞). Combining (4.28)-(4.29) and (4.6), +we have +I → β1 +� T +0 +� +Γ +�∆Γξ(s, 0, t)v(s, 0, t), +|II + III| → 0, +as δ → 0. Thus, for c ∈ (0, 1], we arrive at +L[v, ξ] = β1 +� T +0 +� +Γ +v �∆Γξ, +from which v satisfies the effective boundary condition k ∂v +∂n = β1 �∆Γv. For c = 0, we have +L[v, ξ] = β1 +� T +0 +� +Γ +v ∂2ξ +∂τττ 2 +1 +, +from which v satisfies the boundary condition k ∂v +∂n = β1 ∂2v +∂τττ 2 +1 . +Subcase (3ii). µ1δ → ∞ as δ → 0. In this case, h1 → H ∈ [0, H] after passing to a subsequence. We first +consider the case of c ∈ (0, 1]. If H = 0, we have +� T +0 +� +Γ +�∆Γξ(p, 0, t)vdsdt = 0, +24 + +leading to v(·) = m(t) on Γ for almost t ∈ (0, T ). If H ∈ (0, ∞], we have +� T +0 +� +Γ vKH +N [ξ] = 0, implying +that v(·) = m(t) on Γ for almost t ∈ (0, T ). Then, we choose a special test function ξ = ξ(t) on Γ and +a constant extension in Ω2 such that L[v, ξ] = 0, which shows that v satisfies the boundary condition +� T +0 +∂v +∂n = 0. +For the case of c = 0, if H = 0, we have +� T +0 +� +Γ v ∂2ξ +∂τττ 2 +1 dsdt = 0, implying that v(·) = v(s2, t) on Γ for +almost t ∈ (0, T ). If H ∈ (0, ∞], we have +� T +0 +� +Γ vKH +N [ξ] = 0, implying that v(·) = v(s2, t) on Γ for almost +t ∈ (0, T ). +We start with the proof by taking a test function ξ satisfying ξs1 = 0 on Γ. +Furthermore, let +φ = φ(s2, r, t) be defined by +� σφrr + µ2φs2s2 = 0, +R × (0, δ), +φ(s2, 0, t) = g(s2), +φr(s2, δ, t) = 0, +(4.30) +where g(s2) := ξ(p(s2), 0, t). Let r = R +� +σ/µ2 and Φ(s2, R) = φ(s2, r, t). Substituting these into (4.30) +gives +� +ΦRR + Φs2s2 = 0, +R × (0, h2), +Φ(s2, 0) = g(s2), +ΦR(s2, h2) = 0, +(4.31) +where h2 = δ +� +µ2/σ. Moreover, define Dh2 +N [g] := ΦR(s2, 0). +To estimate the size of ΦR(s2, 0) as in (4.6), we have +√σµ2∥Ψδ +R(s, 0)∥L∞(Γ) = +�� +µ2δ +� +ξs2s2(s, 0, t) + O(h2 +2) +� +, +if h2 → 0 as δ → 0, +O(√σµ2), +if h2 ∈ (0, ∞] as δ → 0. +(4.32) +From now on, given φ = φ(s2, r, t) in Ω2, the following focus on the relationships of δ, σ and µ2. We now +consider cases (a)σµ2 → 0, (b)√σµ2 → γ2 ∈ (0, ∞), (c)σµ2 → ∞. +Subcase (3iia). σµ2 → 0 as δ → 0. Like in Case 1, we have L[v, ξ] = 0, showing that v satisfies the +boundary condition +� +Γ1 +∂v +∂n = 0 on Γ × (0, T ). +Subcase (3iib). √σµ2 → γ2 ∈ (0, ∞) as δ → 0. Assume further that µ2δ → 0. In this case, h → 0 as +δ → 0. By the similar proof as in Case 2, we have L[v, ξ] = 0. So, v satisfies the effective boundary +condition +� +Γ1 +∂v +∂n = 0 on Γ × (0, T ). +On the other hand, if µ2δ → γ2 ∈ (0, ∞], then by (4.28)-(4.29) and (4.6), we are led to +L[v, ξ] = γ1 +� T +0 +� +Γ +vDβ2/γ2 +N +[ξ], +from which v satisfies the boundary condition +� +Γ1 +� +k ∂v +∂n − γ1Kβ2/γ2 +N +[v] +� += 0 on Γ × (0, T ). +Subcase (3iic). σµ2 → ∞ as δ → 0. Assume further that µ2δ → β2 ∈ [0, ∞). In this case, h2 → 0. By +virtue of (4.28)-(4.29) and (4.6), we get +L[v, ξ] = β2 +� T +0 +� +Γ +v ∂2ξ +∂τττ 2 +2 +, +from which v satisfies +� +Γ1 +� +k ∂v +∂n − β2 ∂2v +∂τττ 2 +2 +� += 0 on Γ × (0, T ). +If µ2δ → ∞, then in this case, h2 → H ∈ [0, ∞] after passing to a subsequence. If H = 0, then +divided both sides of the equation (4.26) by µ2δ and sending δ → 0, we obtain � T +0 +� +Γ v ∂2ξ +∂τττ 2 +2 = 0, implying +that v(·) = m(t) on Γ for almost t ∈ (0, T ). +If H ∈ (0, ∞], then divided both sides of (4.26) by √σµ2 and sending δ → 0, we obtain +� T +0 +� +Γ vDH +N [ξ] = +0, implying that v(·) = m(t) on Γ for almost t ∈ (0, T ). +Therefore, by taking a special test function ξ = ξ(t) on Γ and using a constant extension ξ = ξ(t) in +Ω2, we obtain L[v, ξ] = 0, implying that v the boundary condition +� +Γ +∂v +∂n = 0 on Γ × (0, T ). +This completes the whole proof. +25 + +Acknowledgments +The author is indebted to his advisor Professor Xuefeng Wang for his guidance and Dr. Yantao Wang +for his helpful discussions. +References +[1] H.Brezis, L.A. Caffarelli and A. Friedman Reinforcement problems for elliptic equations and varia- +tional inequalities, Ann. Mat. Pura Appl., 123 (1980), 219–246. +[2] G. Buttazzo and R. V. Kohn, Reinforcement by a thin layer with oscillating thickness, Appl. Math. +Optim., 16 (1987), 247–261. +[3] H. Carslaw and J. Jaeger, Conduction of heat in solids, Reprint of the second edition, New York, +1988. +[4] X. Chen, C. Pond and X. Wang, Effective boundary conditions resulting from anisotropic and opti- +mally aligned coatings: the two dimensional case, Arch. Ration. Mech. Anal., 206 (2012), 911–951. +[5] X. Geng, Effective Boundary Conditions Arising from the Heat Equation with Three-dimensional +Interior Inclusion, preprint. +[6] D. Gilbarg and N. Trudinger, Elliptic partial differential equations of second order, Reprint of the +1998 edition, Springer-Verlag, Berlin, 2001. +[7] +H. Li, J. Li and X. Wang, +Error estimates and lifespan of effective boundary conditions for 2- +dimensional optimally aligned coatings, J. Differential Equations, 303 (2022), 1-41. +[8] H. Li and X. Wang, Using effective boundary conditions to model fast diffusion on a road in a large +field, Nonlinearity, 30 (2017), 3853–3894. +[9] H. Li and X. Wang, Effective boundary conditions for the heat equation with interior inclusion, +Commun. Math. Res., 36 (2020), 272–295. +[10] J. Li, Asymptotic behavior of solutions to elliptic equations in a coated body, Comm. Pure App. +Anal., 8 (2009), 1251–1267. +[11] J. Li, S. Rosencrans, X. Wang and K. Zhang, Asymptotic analysis of a Dirichlet problem for the +heat equation on a coated body, Proc. Amer. Math. Soc., 137 (2009), 1711-1721. +[12] J. Li, L. Su, X. Wang and Y. Wang, Bulk-surface coupling: derivation of two models, J. Differential +Equations, 289 (2021), 1-34. +[13] J. Li, X. Wang, G. Zhang and K. Zhang, Asymptotic behavior of Robin problem for heat equation +on a coated body, Rocky Mountain J. Math., 42 (2012), 937–958. +[14] J. Li and K. Zhang, Reinforcement of the Poisson equation by a thin layer, Math. Models Methods +Appl. Sci., 21 (2011), 1153–1192. +[15] Y.Y. Li and M. Vogelius, Gradient estimates for solutions to divergence form elliptic equations with +discontinuous coefficients, Arch. Ration. Mech. Anal., 153 (2000), 91–151. +[16] J. L. Lions and E. Magenes, Non-homogeneous boundary value problems and applications, Springer- +Verlag, New York, 1973. +[17] O. A. Oleinik and E. V. Radkevic, Second-Order Equations With Nonnegative Characteristic Form, +Springer New York, New York, 1973. +26 + +[18] S. Rosencrans and X. Wang, Suppression of the Dirichlet eigenvalues of a coated body, SIAM J. +Appl. Math., 66 (2006), 1895–1916; Corrigendum, SIAM J. Appl. Math., 68 (2008), p1202. +[19] E. Sanchez-Palencia, Probl`emes de perturbations li´es aux ph´enom`enes de conduction `a travers des +couches minces de grande r´esistivit´e. (French), J. Math. Pures Appl., 53 (1974), 251–269. +[20] X. Wang, Effective boundary conditions of diffusion equations on domains containing thin layers +(in Chinese), Sci. Sin. Math., 46 (2016), 709-724. +[21] J. Wloka, Partial differential equations, Cambridge University Press, Cambridge, 1987. +27 + diff --git a/LNFRT4oBgHgl3EQf1jiU/content/tmp_files/load_file.txt b/LNFRT4oBgHgl3EQf1jiU/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..30a2bf5a18b38dd62b920b542a65f88b13da2faa --- /dev/null +++ b/LNFRT4oBgHgl3EQf1jiU/content/tmp_files/load_file.txt @@ -0,0 +1,1067 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf,len=1066 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='13657v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='AP] 31 Jan 2023 Effective Boundary Conditions for Heat Equation Arising from Anisotropic and Optimally Aligned Coatings in Three Dimensions Xingri Geng a,b aDepartment of Mathematics, Southern University of Science and Technology, Shenzhen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' China b Department of Mathematics, National University of Singapore, Singapore Abstract We discuss the initial boundary value problem for a heat equation in a domain surrounded by a layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The main features of this problem are twofold: on one hand, the layer is thin compared to the scale of the domain, and on the other hand, the thermal conductivity of the layer is drastically different from that of the bulk;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' moreover, the bulk is isotropic, but the layer is anisotropic and “optimally aligned” in the sense that any vector in the layer normal to the interface is an eigenvector of the thermal tensor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We study the effects of the layer by thinking of it as a thickless surface, on which “effective boundary conditions” (EBCs) are imposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the three-dimensional case, we obtain EBCs by investigating the limiting solution of the initial boundary value problem subject to either Dirichlet or Neumann boundary conditions as the thickness of the layer shrinks to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' These EBCs contain not only the standard boundary conditions but also some nonlocal ones, including the Dirichlet-to-Neumann mapping and the fractional Laplacian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' One of the main features of this work is to allow the drastic difference in the thermal conductivity in the normal direction and two tangential directions within the layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Keywords.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' heat equation, thin layer, energy estimates, asymptotic behavior, effective boundary condi- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' AMS subject classifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 35K05, 35B40, 35B45,74K35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 1 Introduction This paper is concerned with the scenario of insulating an isotropic conducting body with a coating whose thermal conductivity is anisotropic and drastically different from that of the body.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, the coating is thin compared to the scale of the body, resulting in multi-scales in the spatial variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The difference in thermal conductivity and spatial size leads to computational difficulty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Some examples of this type of situation include cells with their membranes and thermal barrier coatings (TBCs) for turbine engine blades (see Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' To handle such situations, we view the coating as a thickless surface as its thickness shrinks to zero, on which “effective boundary conditions” (EBCs) are imposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' These EBCs not only provide an alternative way for numerical computation but also give us an analytic interpretation of the effects of the coating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The main purpose of this work is to find effective boundary conditions rigorously in a three-dimensional domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the article of Chen, Pond, and Wang [4], EBCs were studied in the two-dimensional case when the coating is anisotropic and “optimally aligned”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' However, it is not straightforward to extend their results in three dimensions because a degenerate equation that never happens in two dimensions arises.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' E-mail addresses: gengxingri@u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='nus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 1 This paper treats the case when the domain is three-dimensional, and the coating is “optimally aligned” with two tangent diffusion rates that may be different, which has not been covered by the previous results yet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Ω2 ∂Ω Ω1 Figure 1: Ω = Ω1 ∪ Ω2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Γ p δ n To be more specific, we introduce our mathematical model as follows: let the body Ω1 be surrounded by the coating Ω2 with uniform thickness δ > 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' let the domain Ω = Ω1 ∪ Ω2 ⊂ R3 as shown in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For any finite T > 0, consider the initial boundary value problem with the Dirichlet boundary condition \uf8f1 \uf8f2 \uf8f3 ut − ∇ · (A(x)∇u) = f(x, t), (x, t) ∈ QT , u = 0, (x, t) ∈ ST , u = u0, (x, t) ∈ Ω × {0}, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) where QT := Ω × (0, T ) and ST := ∂Ω × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Suppose that u0 ∈ L2(Ω), f ∈ L2(QT ), and A(x) is the thermal conductivity given by A(x) = � kI3×3, x ∈ Ω1, (aij(x))3×3 , x ∈ Ω2, where k is a positive constant independent of δ > 0, and the positive-definite matrix (aij(x)) is anisotropic and “optimally aligned” in the coating Ω2, which means that any vector inside the coating normal to the interface is always an eigenvector of A(x)− see (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) below for the precise definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, we also consider the initial value problem with the Neumann boundary condition \uf8f1 \uf8f2 \uf8f3 ut − ∇ · (A(x)∇u) = f(x, t), (x, t) ∈ QT , ∂u ∂nA = 0, (x, t) ∈ ST , u = u0, (x, t) ∈ Ω × {0}, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) where nA is the co-normal vector A(x)n, with n being the unit outer normal vector field on Γ(= ∂Ω1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, the Neumann boundary condition is the same as ∂u ∂n = 0 since the coating is “optimally aligned” − see below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the three-dimensional case, since the thermal tensor A(x) is positive-definite, it has three orthogonal eigenvectors and corresponding eigenvalues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Every eigenvalue measures the thermal conductivity of the coating in the corresponding direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By saying the coating Ω2 is optimally aligned, we mean that A(x)n(p) = σn(p), ∀x ∈ Ω2, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) where Ω2 is thin enough and Γ is smooth enough such that the projection p of x onto Γ is unique, and n(p) is the unit outer normal vector of Γ at p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' This concept was first introduced by Rosencrans and Wang [18] in 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Because of the optimally aligned coatings, A(x) must have two eigenvectors in the tangent directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If A(x) has two identical eigenvalues in the tangent directions, then within the coating Ω2, we assume that the thermal tensor A(x) satisfies Type I condition : A(x)s(p) = µs(p), ∀x ∈ Ω2, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4) 2 where s(p) is an arbitrary unit tangent vector of Γ at p;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σ and µ are called the normal conductivity and the tangent conductivity, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If A(x) has two different eigenvalues µ1 and µ2 in the tangent directions, then two tangent directions are fixed on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' According to the Hairy Ball Theorem in algebraic topology, there is no nonvanishing continuous tangent vector field on even-dimensional n−spheres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Therefore, in this paper, we consider Γ to be a topological torus that is any topological space homeomorphic to a torus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Within the coating Ω2, we assume that the thermal tensor A(x) satisfies Type II condition: A(x)τττ 1(p) = µ1τττ 1(p), A(x)τττ 2(p) = µ2τττ 2(p), (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5) where τττ 1(p) and τττ 2(p) are two orthonormal eigenvectors of A(x) in the tangent plane of Γ at p;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µ1 and µ2 are two different tangent conductivities in the corresponding tangent directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Throughout this article, Ω1 is fixed and bounded with C2 smooth boundary Γ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' the coating Ω2 is uniformly thick with ∂Ω approaching Γ as δ → 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σ, µ, µ1 and µ2 are positive functions of δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' There have been rich, deep, and interesting results about the idea of using EBCs in the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' It can date back to the classic book of Carslaw and Jaeger [3], where EBCs were first recorded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subsequently, Sanchez-Palencia [19] first investigated the “interior reinforcement problem” for the elliptic and parabolic equations in a particular case when the reinforcing material is lens-shaped.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Following this line of thought, Brezis, Caffarelli, and Friedman [1] rigorously studied the elliptic problem for both interior and boundary reinforcement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' See Li and Zhang [10, 14] for further development.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For the case of a rapid oscillating thickness of the coating, see [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Later on, lots of follow-up works of EBCs for general coatings and “optimally aligned coatings” emerged (see [4,5,7–9,11–14]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Furthermore, there is also a review paper [20] that provides a thorough investigation of this topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The layout of this paper is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Section 2 is devoted to establishing some basic energy estimates and a compactness argument, showing that u converges to some v after passing to a subsequence of {u}δ>0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In Section 3, we derive effective boundary conditions on Γ × (0, T ) for the case of Type I condition, in which two auxiliary functions are developed via harmonic extensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In Section 4, based on two different harmonic extensions, we address effective boundary conditions on Γ × (0, T ) for the case of Type II condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 2 Weak solutions In this section, we begin with some a priori estimates, by which a compact argument is established to study the asymptotic behavior of the weak solution of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 Preliminaries Before going into energy estimates, we first introduce some important Sobolev spaces: let W 1,0 2 (QT ) be the subspace of functions belonging to L2(QT ) with first order weak derivatives in x also being in L2(QT );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' W 1,1 2 (QT ) is defined similarly with the first order weak derivative in t belonging to L2(QT );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' W 1,0 2,0 (QT ) is the closure in W 1,0 2 (QT ) of C∞ functions vanishing near ST , and W 1,1 2,0 (QT ) is defined similarly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Furthermore, denote V 1,0 2,0 (QT ) := W 1,0 2,0 (QT ) ∩ C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let us define one more Sobolev space on Q1 T = Ω1×(0, T ) : V 1,0 2 (Q1 T ) = W 1,0 2 (Q1 T )∩C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We endow all these spaces with natural norms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For simplicity, we write � QT u(x, t)dxdt instead of � T 0 � Ω u(x, t)dxdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' A function u is said to be a weak solution of the Dirichlet problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1), if u ∈ V 1,0 2,0 (QT ) and for any ξ ∈ C∞(QT ) satisfying ξ = 0 at t = T and near ST , it holds that A[u, ξ] := − � Ω u0ξ(x, 0)dx + � QT (A(x)∇u · ∇ξ − uξt − fξ) dxdt = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) 3 The weak solution of the Neumann problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) is defined in the same way, except that u ∈ V 1,0 2 (QT ), and ξ ∈ C∞(QT ) satisfies ξ = 0 at t = T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, for any small δ > 0, (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) admits a unique weak solution u ∈ W 1,0 2 (QT ) ∩ C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As is well known, u satisfies the following “transmission conditions” in the weak sense u1 = u2, k∇u1 · n = σ∇u2 · n on Γ, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) where u1 and u2 are the restrictions of u on Ω1 × (0, T ) and Ω2 × (0, T ), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2 Basic energy estimates In the sequel, for notational convenience, let C(T ) represent a generic positive constant depending only on T ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' let O(1) represent a quantity that varies from line to line but is independent of δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We provide the following energy estimates for the weak solution of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Suppose f ∈ L2(QT ) and u0 ∈ L2(Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Then, any weak solution u of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) satisfies the following inequalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (i) max t∈[0,T ] � Ω u2(x, t)dx + � QT ∇u · A(x)∇udxdt ≤ C(T ) �� Ω u2 0dx + � QT f 2dxdt � , (ii) max t∈[0,T ] t � Ω ∇u · A(x)∇udx + � QT tu2 tdxdt ≤ C(T ) �� Ω u2 0dx + � QT f 2dxdt � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (i) and (ii) can be proved formally by a standard method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Multiplying (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) and (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) by u and tut respectively, we perform the integration by parts in both x and t over Ω×(0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By the same analysis on the Galerkin approximation of u, this formal argument can be made rigorous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Hence, we omit the details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We prove our results using only H1 a priori estimates, and higher order estimates are not needed for Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We refer interested readers to [4, Theorem 5] for more general higher order estimates for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) and (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For general coefficients A(x, t) = (aij(x, t))N×N, let aij(x, t) satisfy � i,j aij(x, t)ξiξj ≥ λ0|ξ|2, for any ξ ∈ RN and some constant λ0 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We also address the regularity results of u near the interface Γ without rigorous proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let m be an integer with m ≥ 2 and a ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Suppose that Γ ∈ Cm+a, f ∈ Cm−2+a,(m−2+a)/2 � Ωh × [0, T ] � (h = 1, 2), and aij ∈ Cm−1+a,(m−1+a)/2(Ωh×[0, T ]), then for any t0 > 0, the weak solution u of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) satisfies u ∈ Cm+a,(m+a)/2(N h × [t0, T ]), where N is a narrow neighborhood of Γ and Nh = N ∩ Ωh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The proof of the theorem can be found in [4], and hence we omit the details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3 A compactness argument We next turn to the compactness of the family of functions {u}δ>0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Suppose that Γ ∈ C2, u0 ∈ L2(Ω) and f ∈ L2(QT ) with all functions remaining unchanged as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Then, after passing to a subsequence of δ → 0, the weak solution u of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) converges to some v weakly in W 1,0 2 (Ω1 × (0, T )), strongly in C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 4 Proof of the theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' According to Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1, {u}δ>0 is bounded in W 1,0 2 (Ω1 × (0, T )).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For any small t0 ∈ (0, T ], {u}δ>0 is also bounded in C([t0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H1(Ω1)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By Banach-Eberlein theorem, u converges to some v weakly in C([t0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H1 0(Ω1)) after passing to a subsequence of δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Together with the compactness of the embedding H1(Ω1) ֒→ L2(Ω1), for any fixed t0, {u}δ>0 is precompact in L2(Ω1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Furthermore, the functions {u}δ>0 : t ∈ [t0, T ] �→ u(·, t) ∈ L2(Ω1) are equicontinuous because the term � QT tu2 tdxdt is bounded due to Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Consequently, the generalized Arzela-Ascoli theorem suggests that after passing to a further subsequence of δ → 0, u → v strongly in C � [t0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In what follows, it suffices to prove that the strong convergence of u → v is in C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' To this end, we take a sequence un 0 ∈ C∞ 0 (Ω1) such that ∥u0 − un 0∥L2(Ω) ≤ 1 n + ∥u0∥L2(Ω2), where un 0 = 0 in Ω2 and ∥∇un 0∥L2(Ω) ≤ C(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Such un 0 can be constructed by multiplying u0 by cut-off functions in the outer normal direction of Γ such that the gradient of un 0 is independent of δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Then, we decompose u = u1 + u2, where u1 and u2, respectively, are the unique weak solutions of the following problems: \uf8f1 \uf8f2 \uf8f3 (u1)t − ∇ · (A(x)∇u1) = 0, (x, t) ∈ QT, u1 = 0, (x, t) ∈ ST , u1 = u0 − un 0, (x, t) ∈ Ω × {0}, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) \uf8f1 \uf8f2 \uf8f3 (u2)t − ∇ · (A(x)∇u2) = f(x, t), (x, t) ∈ QT , u2 = 0, (x, t) ∈ ST , u2 = un 0, (x, t) ∈ Ω × {0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4) By the similar proof as used in Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1, we have the energy estimates ∥u1(·, t)∥L2(Ω) ≤ ∥u0 − un 0∥L2(Ω) ≤ 1 n + ∥u0∥L2(Ω2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5) Employing energy estimates on (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4), we get � t 0 � Ω (u2)2 t dxdt + � Ω ∇u2(x, t) · A(x)∇u2(x, t)dx ≤ � t 0 � Ω f 2dxdt + � Ω ∇un 0 · A(x)∇un 0 dx ≤ � t 0 � Ω f 2dxdt + k1 � Ω1 |∇un 0|2dx =: F(f, n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6) Combining this with (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5), for any t ∈ [0, t0], we obtain ∥u2(·, t) − un 0(·)∥2 L2(Ω) = 2 � t 0 � Ω (u2(x, t) − un 0(x)) (u2)tdxdt ≤ 2 �� t 0 � Ω (u2(x, t) − un 0(x))2 � 1 2 �� t 0 � Ω (u2)2 t � 1 2 ≤ 2√t0 max t∈[0,t0] ∥u2(·, t) − un 0(·)∥L2(Ω) (F(f, n)) 1 2 , from which it follows that max t∈[0,t0] ∥u2(·, t) − un 0(·)∥ ≤ 2√t0 (F(f, n)) 1 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7) Finally, for t ∈ [0, t0], it holds from (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5) and (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7) that ∥u(·, t) − u0(·)∥L2(Ω1) ≤ ∥u1(·, t)∥L2(Ω1) + ∥u2(·, t) − u0(·)∥L2(Ω1) + ∥u0 − u0(·)∥L2(Ω1) ≤ 2 n + 2∥u0∥L2(Ω2) + 2√t0 (F(f, n)) 1 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Because t0 and δ are small enough, ∥u(·, t) − u0(·)∥L2(Ω1) can be arbitrary small for t ∈ [0, t0].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Using the fact that u → v strongly in C � [t0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � , we conclude that u → v strongly in C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � if we define v(·, 0) = u0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 5 3 EBCs for Type I condition Throughout this section, we always have the assumption of Type I condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Under this condition, we aim to derive EBCs on Γ × (0, T ) as the thickness of the layer shrinks to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Suppose that A(x) is given in (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) and satisfies (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let u0 ∈ L2(Ω) and f ∈ L2(QT ) with functions being independent of δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Assume further that σ and µ satisfy the scaling relationships lim δ→0 σµ = γ ∈ [0, ∞], lim δ→0 σ δ = α ∈ [0, ∞], lim δ→0 µδ = β ∈ [0, ∞].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let u be the weak solution of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2), then as δ → 0, u → v weakly in W 1,0 2 (Ω1 × (0, T )), strongly in C([0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1)), where v is the weak solution of � vt − k∆v = f(x, t), (x, t) ∈ Ω1 × (0, T ), v = u0, (x, t) ∈ Ω1 × {0}, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) subject to the effective boundary conditions on Γ × (0, T ) listed in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Table 1: Effective boundary conditions on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' EBCs on Γ × (0, T ) for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As δ → 0 σ δ → 0 σ δ → α ∈ (0, ∞) σ δ → ∞ σµ → 0 ∂v ∂n = 0 k ∂v ∂n = −αv v = 0 √σµ → γ ∈ (0, ∞) k ∂v ∂n = γJ ∞ D [v] k ∂v ∂n = γJ γ/α D [v] v = 0 σµ → ∞ ∇Γv = 0, � Γ ∂v ∂n = 0 ∇Γv = 0, � Γ(k ∂v ∂n + αv)dx = 0 v = 0 EBCs on Γ × (0, T ) for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As δ → 0 µδ → 0 µδ → β ∈ (0, ∞) µδ → ∞ σµ → 0 ∂v ∂n = 0 ∂v ∂n = 0 ∂v ∂n = 0 √σµ → γ ∈ (0, ∞) ∂v ∂n = 0 k ∂v ∂n = γJ β/γ N [v] k ∂v ∂n = γJ ∞ N [v] σµ → ∞ ∂v ∂n = 0 k ∂v ∂n = β∆Γv ∇Γv = 0, � Γ ∂v ∂n = 0 We now focus on the boundary conditions arising in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The boundary condition ∇Γv = 0 on Γ × (0, T ) indicates that v is a constant in the spatial variable (but it may depend on t), where ∇Γ is the surface gradient on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The operator ∆Γ is the Laplacian-Beltrami operator defined on Γ, and the boundary condition k ∂v ∂n = β∆Γv can be understood as a second-order partial differential equation on Γ, revealing that the thermal flux across Γ in the outer normal direction causes heat accumulation that diffuses with the diffusion rate β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' J H D and J H N , as shown in Table 1, are linear and symmetric operators mapping the Dirichlet value to the Neumann value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' More precisely, for H ∈ (0, ∞), and smooth g defined on Γ, we define J H D [g](s) := ΘR(s, 0) and J H N [g](s) := ΠR(s, 0), where Θ and Π are, respectively, the bounded solutions of � ΘRR + ∆ΓΘ = 0, Γ × (0, H), Θ(s, 0) = g(s), Θ(s, H) = 0, � ΠRR + ∆ΓΠ = 0, Γ × (0, H), Π(s, 0) = g(s), ΠR(s, H) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The analytic formulas for J H D [g] and J H N [g] are given and deferred to Subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We then define (J ∞ D [g], J ∞ N [g]) := lim H→∞ � J H D [g], J H N [g] � , where J ∞ D [g] = J ∞ N [g] = − (−∆Γ)1/2 g is the fractional Laplacian-Beltrami defined on g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 Definition, existence and uniqueness of weak solutions of effective models We define weak solutions of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) together with the boundary conditions in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let the test function ξ ∈ C∞(Q1 T ) satisfy ξ = 0 at t = T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (1) A function v is said to be a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the Dirichlet boundary condition v = 0 if v ∈ V 1,0 2,0 (Q1 T ), and for any test function ξ, v satisfies L[v, ξ] := − � Ω1 u0(x)ξ(x, 0)dx + � T 0 � Ω1 (k∇v · ∇ξ − vξt − fξ) dxdt = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) (2) A function v is said to be a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary conditions ∇Γv = 0 and � Γ(k ∂v ∂n + αv) = 0 if for almost everywhere fixed t ∈ (0, T ), the trace of v on Γ is a constant, and if ∇Γξ = 0 on Γ, it holds that v ∈ V 1,0 2 (Q1 T ) and v satisfies L[v, ξ] = − � T 0 � Γ αvξdsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3) A function v is said to be a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary condition k ∂v ∂n = B[v], where B[v] = −αv, or γJ H D [v], or γJ H N [v] for H ∈ (0, ∞], if v ∈ V 1,0 2 (Q1 T ) and if for any test function ξ, v satisfies L[v, ξ] = � T 0 � Γ vB[ξ]dsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4) A function v is said to be a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary condition k ∂v ∂n = β∆Γv, if v ∈ V 1,0 2 (Q1 T ) with its trace belonging to L2 � (0, T );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H1(Γ) � , and if for any test function ξ, v satisfies L[v, ξ] = −β � T 0 � Γ ∇Γv∇Γξdsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' A weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) satisfies the initial value in the sense that v(·, t) → u0(·) in L2(Ω1) as t → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, the existence and uniqueness of the weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary conditions in Tables 1 are stated without proof in the following theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Suppose that Γ ∈ C1, u0 ∈ L2(Ω1) and f ∈ L2(Q1 T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Then, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with any boundary condition in Tables 1 has one and only one weak solution as defined in Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For a rigorous proof of the theorem, the reader is referred to [4] (see also [16] and [21]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Before proceeding further, enlightened by [12], we first begin with a geometric preparation for the coating Ω2 by introducing the curvilinear coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Now, we define a mapping F Γ × (0, δ) �→ x = F(p, r) = p + rn(p) ∈ R3, where p is the projection of x on Γ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' n(p) is the unit normal vector of Γ pointing out of Ω1 at p;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' r is the distance from x to Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As is well known ( [6], Lemma 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='16), for a small δ > 0, F is a C1 smooth diffeomorphism from Γ × (0, δ) to Ω2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' r = r(x) is a C2 smooth function of x and is seen as the inverse of the mapping x = F(p, r).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By using local coordinates s = (s1, s2) in a typical chart on Γ, we then have p = p(s) = p(s1, s2), x = F(p(s), r) = F(s, r), dx = (1 + 2Hr + κr2)dsdr in Ω2, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) where ds represents the surface element;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H(s)and κ(s) are the mean curvature and Gaussian curvature at p on Γ, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 7 In the curvilinear coordinates, the Riemannian metric tensor at x ∈ Ω2 induced from R3 is defined as G(s, r) with elements gij(s, r) = gji(s, r) =< Fi, Fj >R3, i, j = 1, 2, 3, where Fi = Fsi for i = 1, 2 and F3 = Fr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let |G| := detG and gij(s, r) be the element of the inverse matrix of G, denoted by G−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the curvilinear coordinates (s, r), the derivatives of u are given as follows ∇u = urn + ∇su;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∇su = � i,j=1,2 gij(s, r)usjFsi(s, r) and ∇Γu = � i,j=1,2 gij(s, 0)usjpsi(s);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4) ∇ · (A(x)∇u) = σ � |G| �� |G|ur � r + µ∆su;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∆su = ∇s · ∇su = 1 � |G| � ij=1,2 �� |G|gij(s, r)usi � sj .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5) Moreover, if A(x) satisfies Type I condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4), then in Ω2, we have A(x) = σn(p) ⊗ n(p) + µ � ij gij(s, r)Fsi(s, r) ⊗ Fsj(s, r).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2 Auxiliary functions Our goal for this subsection is to construct auxiliary functions and estimate their asymptotic behaviors when the thickness of the thin layer is sufficiently small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Our idea of developing the auxiliary functions is adapted from [4] via a harmonic extension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We construct two auxiliary functions for Type I condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4) by defining θ and π.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For every t ∈ [0, T ], let θ(s, r, t) and π(s, r, t) be bounded solutions of � σθrr + µ∆Γθ = 0, Γ × (0, δ), θ(s, 0, t) = g(s), θ(s, δ, t) = 0, � σπrr + µ∆Γπ = 0, Γ × (0, δ), π(s, 0, t) = g(s), πr(s, δ, t) = 0, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7) where g(s) = g(p(s)) = ξ(s, 0, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From the maximum principle, θ and π are unique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Multiplying (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7) by θ and π respectively, and implementing integration by parts over Γ × (0, δ), we arrive at � δ 0 � Γ � σθ2 r + µ|∇Γθ|2� = − � Γ σθr(s, 0, t)g(s), � δ 0 � Γ � σπ2 r + µ|∇Γπ|2� = − � Γ σπr(s, 0, t)g(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='8) Multiplying (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7) by u respectively and performing the integration by parts again, we get � δ 0 � Γ (σθrur + µ∇Γθ · ∇Γu) = − � Γ σθr(s, 0, t)u(p(s), t), � δ 0 � Γ (σπrur + µ∇Γπ · ∇Γu) = − � Γ σπr(s, 0, t)u(p(s), t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='9) To eliminate σ and µ, we assert r = R � σ/µ and plug r into (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Suppressing the time dependence, this leads to Θ(s, R) = θ(s, R � σ/µ, t), Π(s, R) = π(s, R � σ/µ, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Consequently, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7) is equivalent to � ΘRR + ∆ΓΘ = 0, Γ × (0, h), Θ(s, 0) = g(s), Θ(s, h) = 0, � ΠRR + ∆ΓΠ = 0, Γ × (0, h), Π(s, 0) = g(s), ΠR(s, h) = 0, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='10) 8 where h := δ � µ σ = µδ √σµ = √σµ σ/δ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We now define two Dirichlet-to-Neumann operators J h D[g](s) := ΘR(s, 0) and J h N[g](s) := ΠR(s, 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='11) Observe σθr(s, 0, t) = √σµΘR(s, 0) = √σµJ h D[g](s), σπr(s, 0, t) = √σµΠR(s, 0) = √σµJ h N[g](s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='12) Rigorous formulas for J h D[g] and J h N[g] are given in eigenvalues and eigenfunctions of −∆Γ by using separation of variables, from which it follows that Θ(s, R) = ∞ � n=1 −gne−√λnh 2sinh(√λnh) � e √λnR − e √λn(2h−R)� en(s), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='13) Π(s, R) = ∞ � n=1 gne−√λnh 2cosh(√λnh) � e √λnR + e √λn(2h−R)� en(s), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='14) where gn :=< en, g >= � Γ engds;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' λn and en(s) are the eigenvalues and the corresponding eigenfunctions of the Laplacian-Beltrami operator −∆Γ defined on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subsequently, it follows from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='11) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='13) that J h D[g](s) = − ∞ � n=1 √λnen(s)gn tanh(√λnh), J h N[g](s) = − ∞ � n=1 � λnen(s)gntanh( � λnh).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='15) Furthermore, if h → H ∈ (0, ∞], we have |J h D[g](s) − J H D [g](s)| = ∞ � n=1 � λnen(s)gn � 1 tanh(√λnH) − 1 tanh(√λnh) � = |H − h| ∞ � n=1 λnen(s)gn −4 (e √λnh′ − e−√λnh′)2 = O(|H − h|), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='16) for some h′ between h and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' This implies the uniform convergence in h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By using a similar analysis as above, if h → H ∈ (0, ∞], J h N[g] converges uniformly to J H N [g] where J ∞ D [g] = J ∞ N [g] := −(−∆Γ)1/2g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the follow-up, we are going to estimate the size of the term ΘR(s, 0) and ΠR(s, 0) for a sufficiently small δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' On one hand, if h is small and h → 0 as δ → 0, then it follows from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='15) that ����ΘR(s, 0) + g(s) h ���� ≤ h∥g∥C2(Γ), |ΠR(s, 0) − h∆Γg| ≤ O(h3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='17) Combining this with (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='12), we get √σµΘR(s, 0) = σ δ � −g(s) + O(h2) � , √σµΠR(s, 0) = µδ � ∆Γg(s) + O(h2) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18) On the other hand, if h → H ∈ (0, ∞] as δ → 0, then from the Taylor expansion for Θ(s, R), we obtain ΘR(s, 0) = Θ(s, R) − Θ(s, 0) R − R 2 ΘRR(s, R), for some R ∈ [0, R].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Taking R = min{h, 1}, from the maximum principle, we have ∥ΘR∥L∞(Γ) ≤ 2 R∥Θ∥L∞(Ω2) + R∥ΘRR∥L∞(Ω2) ≤ 3∥g∥C2(Γ) R , 9 from which it turns out that √σµ∥ΘR∥L∞(Γ) = O(1)√σµ R .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19) By the similar analysis on ΠR, if h → H ∈ (0, ∞] as δ → 0, then we have ∥ΠR∥L∞(Γ) = O(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20) We end this subsection by mentioning that for H ∈ (0, ∞), J H D [g] and J H N [g] are defined for smooth g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' However, it is easy to show that they are also well-defined for given any g ∈ H 1 2 (Γ) where H 1 2 (Γ) is defined by the completion of smooth functions under the H 1 2 (Γ) norm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, J H D and J H N : H 1 2 (Γ) → H− 1 2 (Γ) are linear and bounded, where H− 1 2 (Γ) is the dual space of H 1 2 (Γ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3 Proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 The main result of this subsection is to prove Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1, in which we derive EBCs on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' According to Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2, the weak solution u of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2 converges to some v weakly in W 1,0 2 (Ω1 × (0, T )), and strongly in C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � after passing to a subsequence of δ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Thus, given any subsequence of δ, we emphasize that we can ensure that u → v in all above spaces after passing to a further subsequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the further, we show that v is a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with effective boundary conditions listed in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By what we have proved in Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2, v is unique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The fact that u → v without passing to any subsequence of δ > 0, is a consequence of the uniqueness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' To derive the EBCs on Γ × (0, T ), we complete our proof in the following two steps: one is for the Dirichlet problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1), and the other is for the Neumann problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Step 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Effective boundary conditions for the Dirichlet problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' To begin with the proof, we assume that all conditions in Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let the test function ξ ∈ C∞(Ω1 × [0, T ]) with ξ = 0 at t = T , and extend ξ to the domain Ω × [0, T ] by defining ξ(x, t) = � ξ(x, t), x ∈ Ω1, θ(p(x), r(x), t), x ∈ Ω2, where θ is introduced in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' It is easy to check that ξ ∈ W 1,1 2,0 (QT ), and ξ is called the harmonic extension of ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Since u is a weak solution of (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1), it follows from Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 that A[u, ξ] = − � Ω u0(x)ξ(x, 0)dx + � T 0 � Ω � ∇ξ · A∇u − uξt − fξ � dxdt = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='21) Rewrite (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='21) as � T 0 � Ω1 k∇ξ · ∇udxdt − � Ω u0(x)ξ(x, 0)dx − � T 0 � Ω (uξt + fξ)dxdt = − � T 0 � Ω2 ∇θ · A∇udxdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) Since u → v weakly in W 1,0 2 (Ω1 × (0, T )), and strongly in C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � as δ → 0, we summarize as \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 � QT uξtdxdt → � Q1 T vξtdxdt, � Q1 T ∇u · ∇ξdxdt → � Q1 T ∇v · ∇ξdxdt, � QT fξdxdt → � Q1 T fξdxdt, from which the left-hand side of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) is equivalent to L[v, ξ] := � T 0 � Ω1 k∇ξ · ∇vdxdt − � Ω1 u0(x)ξ(x, 0)dx − � T 0 � Ω1 (vξt + fξ)dxdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='23) 10 The remainder of the following focuses on the right-hand side of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Using the curvilinear coordi- nates (s, r), by virtue of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6), we have RHS := − � T 0 � Ω2 ∇θ · A∇udxdt = − � T 0 � Γ � δ 0 (σθrur + µ∇sθ∇su) (1 + 2Hr + κr2)drdsdt = − � T 0 � Γ � δ 0 (σθrur + µ∇Γθ∇Γu) − � T 0 � Γ � δ 0 (σθrur + µ∇Γθ∇Γu)(2Hr + κr2) − � T 0 � Γ � δ 0 µ(∇sθ∇su − ∇Γθ∇Γu)(1 + 2Hr + κr2) =:I + II + III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='24) Due to (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='9) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='12), it holds that I := � T 0 Idt = √σµ � T 0 � Γ u(p(s), t)ΘR(s, 0)dsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='25) Subsequently, in view of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='8) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19), it follows from Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 that |II| ≤ � T 0 ����� � Γ � δ 0 (σθrur + µ∇Γθ∇Γu)(2Hr + κr2)drds ����� dt =O(δ) � T 0 �� Γ � δ 0 σθ2 r + µ|∇Γθ|2 �1/2 �� Ω σu2 r + µ|∇Γu|2 �1/2 dt =O(δ) � T 0 1 √ t �� Γ σ|θr(s, 0, t)| �1/2 dt =O(δ) √ T(σµ)1/4||ΘR||1/2 L∞(Γ), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='26) where we have used H¨oder inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Consequently, using (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='8) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19), we have |III| ≤ ����� � T 0 � Γ � δ 0 µ(∇sθ∇su − ∇Γθ∇Γu)(1 + 2Hr + κr2) ����� =O(δ) � T 0 � Γ � δ 0 µ| � ij θsiusj| =O(δ) � T 0 �� Γ � δ 0 σθ2 r + µ|∇Γθ|2 �1/2 �� Ω σu2 r + µ|∇Γu|2 �1/2 dt =O(δ) √ T(σµ)1/4||ΘR||1/2 L∞(Γ), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='27) where Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 and H¨oder inequality were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' To investigate the asymptotic behavior of the right-hand side of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) as δ → 0, we consider the following cases (1) σ δ → 0, (2) σ δ → α ∈ (0, ∞), (3) σ δ → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σ δ → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (1i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In view of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='8), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='12), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19), we have |RHS| ≤O(1) � T 0 �� Γ � δ 0 � σθ2 r + µ|∇sθ|2� �1/2 �� Γ � δ 0 � σu2 r + µ|∇su|2� �1/2 dt = O( √ T)max{ �σ δ , (σµ)1/4}, 11 where Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 was used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From this, we have L[v, ξ] = 0, implying that v satisfies ∂v ∂n = 0 on Γ×(0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (1ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ → γ ∈ (0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By the weak convergence of u, as δ → 0, it holds from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='9) that I = √σµ � Γ ΘRu → γ � Γ J ∞ D [ξ]v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, combining (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='26) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='27), we have |II + III| → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' It turns out that L[v, ξ] = γ � T 0 � Γ vJ ∞ D [ξ], which means that v satisfies k ∂v ∂n = γJ ∞ D [v] on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (1iii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Divided both sides of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='21) by √σµ and sending δ → 0, we obtain � T 0 � Γ vJ ∞ D [ξ] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Because the range of J ∞ D [·] contains {en}∞ n=1 for almost everywhere t ∈ (0, T ), it turns out that ∇Γv = 0 on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We further choose a special test function ξ such that ξ(s, 0, t) = m(t) for some smooth function m(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Then, we construct a linear extension by defining θ(s, r, t) = (1 − r δ)m(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Consequently, a direct computation leads to RHS = − � T 0 � Ω2 ∇θ · A∇udxdt = � T 0 σm(t) δ �� δ 0 � Γ ur(1 + 2Hr + κr2) � dt = � T 0 σm(t) δ �� Γ u � dt − � T 0 σm(t) δ � δ 0 � Γ u(2H + 2κr) ≤σ δ � T 0 m(t) � O(1) + O( √ δ)∥u(·, t)∥L2(Ω2) � dt, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28) from which we derive L[v, ξ] = 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Then, v satisfies � Γ ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σ δ → α ∈ (0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='24)- (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='27), we have I → −α � T 0 � Γ vξ and II + III → 0 as δ → 0, from which it follows that L[v, ξ] = −α � T 0 � Γ vξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='29) So, v satisfies k ∂v ∂n = −αv on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ → γ ∈ (0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → H = γ/α ∈ (0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By virtue of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='24)- (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='27), it holds that I → γ � T 0 � Γ vJ γ/α D and II + III → 0 as δ → 0, from which we get L[v, ξ] = γ � T 0 � Γ vJ γ/α D [ξ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' So, v satisfies k ∂v ∂n = γJ γ/α[v] on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2iii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Divided both sides of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) by √σµ and sending δ → 0, we obtain � T 0 � Γ vJ ∞ D [ξ] = 0, resulting in ∇Γv = 0 on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Using the same test function 12 and the auxiliary function in Subcase (1iii), we obtain L[v, ξ] = −α � T 0 � Γ vξ and ∇Γv = 0 on Γ, which means v satisfies � Γ � k ∂v ∂n + αv � = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σ δ → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ → γ ∈ [0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Divided both sides of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) by σ/δ and sending δ → 0, a combination of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='8) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='24)- (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='27) leads to δ σ I → − � T 0 � Γ vξ = 0, from which v satisfies v = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → H ∈ [0, ∞].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H = 0, then divided both sides of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) by σ/δ and sending δ → 0, it yields v = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H ∈ (0, ∞], then divided both sides of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) by √σµ and sending δ → 0, we have I(t) √σµ → � T 0 � Γ vJ H D [ξ] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Employing the argument analogous to that in Subcase (1iii), for almost everywhere t ∈ (0, T ), we have ∇Γv = 0 and � T 0 � Γ vm(t) = 0, which implies v = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Step 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Effective boundary conditions for the Neumann problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let ξ ∈ C∞(Ω1 × [0, T ]) with ξ = 0 at t = T and extend the test function ξ to Ω × [0, T ] by defining ξ(x, t) = � ξ(x, t), x ∈ Ω1, π(p(x), r(x), t), x ∈ Ω2, where π is introduced in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' It is easy to see that ξ ∈ W 1,1 2,0 (QT ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Thanks to the weak convergence of {u}δ>0, as δ → 0, it follows from Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 that L[u, ξ] → L[v, ξ] = −lim δ→0 � T 0 � Ω2 ∇π · A∇udxdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='30) In the following, we focus on the right-hand side of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='30).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By using the curvilinear coordinates (s, r) in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3), it can be rewritten as RHS := − � T 0 � Γ � δ 0 (σπrur + µ∇sπ∇su) (1 + 2Hr + κr2) − � T 0 � Γ � π 0 (σπrur + µ∇Γπ∇Γu) − � T 0 � Γ � δ 0 (σπrur + µ∇Γπ∇Γu)(2Hr + κr2) − � T 0 � Γ � δ 0 µ(∇sπ∇su − ∇Γπ∇Γu)(1 + 2Hr + κr2) =:I + II + III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='31) As noted, write down I = − � δ 0 � Γ (σπrur + µ∇Γπ∇Γu) = √σµ � Γ u(p(s), t)ΠR(s, 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='32) Using the same estimates as in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='26) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='27), we get |II + III| ≤O(δ) � T 0 1 √ t �� Γ σ|πr(s, 0, t)| �1/2 dt = O(δ) √ T(σµ)1/4||ΠR||1/2 L∞(Γ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='33) 13 Next, we consider the following cases (1)σµ → 0, (2)√σµ → γ ∈ (0, ∞), (3)σµ → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='8), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19), we have RHS ≤ O(1) � T 0 �� δ 0 � Γ σπ2 r + µ|∇Γπ|2 �1/2 �� Ω ∇u · A∇u �1/2 dt = O(1) √ T(σµ)1/4, where H¨older inequality and Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' So, we have L[v, ξ] = 0, implying v satisfies ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ → γ ∈ (0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µδ → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In terms of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='32) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='33), we have I → 0 and |II + III| → 0, from which we have L[v, ξ] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' So, v satisfies ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µδ → β ∈ (0, ∞] as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → H = β/γ ∈ (0, ∞].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As δ → 0, it follows from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='33) that I → γ � T 0 � Γ vJ β/γ N [ξ] and |II + III| → 0, from which we get L[v, ξ] = γ � T 0 � Γ vJ β/γ N [ξ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' So, v satisfies k ∂v ∂n = γJ β/γ N [v] on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='σµ → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µδ → β ∈ [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By virtue of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='32), it holds that I = µδ � T 0 � Γ � ∆Γξ + O(h2) � u → β � T 0 � Γ v∆Γξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Additionally, by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='33), |II + III| → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Consequently, we get L[v, ξ] = β � T 0 � Γ v∆Γξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='34) Our next task is to prove that v is the weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary condition k ∂v ∂n = β∆Γv on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' To this end, it remains to show v ∈ L2 � (0, T );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H1(Γ) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We start by asserting that v is the unique weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1), which satisfies (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='34) as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' It suffices to prove v = v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Now consider v − v, without loss of generality, also denoted by v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We then points out that v is the weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with u0 = f = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In particular, by Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1, v ∈ V 1,0 2 (Ω1 × (0, T )) ∩ W 1,1 2 (Ω1 × (t0, T )).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For any small t0 ∈ (0, T ), fix t1 ∈ (t0, T ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As δ → 0, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) is transformed into � t1 t0 � Ω1 (vtξ + k∇v∇ξ)dxdt = β � t1 t0 � Γ v∆Γξdsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='35) Furthermore, take the test function ξ = w(s, t)η(r) with the following assumptions: η = η(r) is a cut-off function in the r variable with 0 ≤ η ≤ 1, satisfying η ∈ C∞(−∞, 0], η = 1 for −ǫ ≤ r ≤ 0 and η = 0 for r ≤ −2ǫ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' w(s, t) ∈ C2(Γ × [0, T ]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='35), we are led to β ���� � t1 t0 � Γ v∆Γξdsdt ���� = ���� � t1 t0 � Ω1 (vtξ + k∇v∇ξ)dxdt ���� ≤ C∥v∥W 1,1 2 (Ω1×(t0,t1))∥w∥L2((t0,t1);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='H1(Γ)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='36) Consider such w with � t1 t0 � Γ wdsdt = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We then define a linear functional: w → � t1 t0 � Γ v∆Γwdsdt, which is well-defined by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='36).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' This functional can be extended to the Hilbert space H = {w ∈ L2 � (t0, t1);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H1(Γ) � : � t1 t0 � Γ wdsdt = 0} 14 with the inner product as < w1, w2 >:= − � t1 t0 � Γ ∇Γw1 · ∇Γw2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From Riesze representation theorem, there is some z ∈ H satisfying − � t1 t0 � Γ ∇Γz · ∇Γwdsdt = � t1 t0 � Γ v∆Γwdsdt = � t1 t0 � Γ z∆Γwdsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='37) Consequently, it follows from (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='37) that � t1 t0 � Γ(v − z)∆Γw = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By Riesze theorem again, this means that v − z = m(t) for some function m(t) ∈ H and thus v ∈ L2 � (0, T );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H1(Γ) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Going back to (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='35), from Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1, we have � Ω1 v2(x, t1)dxdt ≤ � Ω1 v2(x, t0)dxdt, from which we are done by sending t0 → 0 for Subcase (3i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µδ → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → H ∈ [0, ∞] after passing to a subsequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H = 0, then divided both sides of the equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28) by µδ and sending δ → 0, we obtain � T 0 � Γ v∆Γξ = 0, implying that v(·) = m(t) on Γ for almost everywhere t ∈ (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H ∈ (0, ∞], then divided both sides of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28) by √σµ and sending δ → 0, we obtain � T 0 � Γ vJ H N [ξ] = 0, implying that v(·) = m(t) on Γ for almost everywhere t ∈ (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We further take a special test function ξ = ξ(t) on Γ and a constant extension in Ω2 such that ξ = ξ(t), resulting in L[v, ξ] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' So, v satisfies � Γ ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Therefore, we accomplish the whole proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We conclude this section by asking a natural question: what is the effective boundary condition if two eigenvalues of the coating in the tangent directions are not identical?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' That is to say, A(x) has two different eigenvalues in the tangent directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We answer this question by considering Type II condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5) in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 4 EBCs for Type II condition In this section, we always assert that Γ is a topological torus and A(x) satisfies Type II condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The aim of this section is to address EBCs on Γ × (0, T ) as the thickness of the layer decreases to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' With the aid of the curvilinear coordinates (s, r), we choose a global parametrization p(s1, s2) on Γ such that τττ 1 = ps1 |ps1|, τττ 2 = ps2 |ps2|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' More precisely, let Γ := Γ1 × Γ2 satisfy Γ1 = {p(s1, 0)|s1 ∈ [0, l1)} and Γ2 = {p(0, s2)|s2 ∈ [0, l2)}, where p(s) is l1−periodic in s1 and l2−periodic in s2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In Ω2, the explicit formula of A(x) can be expressed as A(x) = σn(p) ⊗ n(p) + µ1τττ 1(p) ⊗ τττ 1(p) + µ2τττ 2(p) ⊗ τττ 2(p).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Suppose that Γ is a topological torus and A(x) is given in (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) or (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) and satisfies (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let u0 ∈ L2(Ω) and f ∈ L2(QT ) with functions being independent of δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Assume further that without loss of generality, µ1 > µ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, σ, µ1, and µ2 satisfy the scaling relationships lim δ→0 µ2 µ1 = c ∈ [0, 1], lim δ→0 σ δ = α ∈ [0, 1], lim δ→0 σµi = γi ∈ [0, ∞], lim δ→0 µiδ = βi ∈ [0, ∞], i = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (i) If c ∈ (0, 1], then as δ → 0, u → v weakly in W 1,0 2 (Ω1 × (0, T )), strongly in C([0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1)), where v is the weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) subject to the effective boundary conditions listed in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (ii) If c = 0 and lim δ→0δ2µ1/µ2 = 0, then u → v weakly in W 1,0 2 (Ω1 × (0, T )), strongly in C([0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1)), where v is the weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) subject to the effective boundary conditions listed in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 15 Table 2: Effective boundary conditions on Γ × (0, T ) for c ∈ (0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' EBCs on Γ × (0, T ) for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As δ → 0 σ δ → 0 σ δ → α ∈ (0, ∞) σ δ → ∞ σµ1 → 0 ∂v ∂n = 0 k ∂v ∂n = −αv v = 0 √σµ1 → γ1 ∈ (0, ∞) k ∂v ∂n = γ1K∞ D [v] k ∂v ∂n = γ1Kγ1/α D [v] v = 0 σµ1 → ∞ ∇Γv = 0, � Γ ∂v ∂n = 0 ∇Γv = 0, � Γ(k ∂v ∂n + αv)dx = 0 v = 0 EBCs on Γ × (0, T ) for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As δ → 0 µ1δ → 0 µ1δ → β1 ∈ (0, ∞) µ1δ → ∞ σµ1 → 0 ∂v ∂n = 0 ∂v ∂n = 0 ∂v ∂n = 0 √σµ1 → γ1 ∈ (0, ∞) ∂v ∂n = 0 k ∂v ∂n = γ1Kβ1/γ1 N [v] k ∂v ∂n = γ1K∞ N [v] σµ1 → ∞ ∂v ∂n = 0 k ∂v ∂n = β1 � ∂2v ∂τττ 2 1 + c ∂2v ∂τττ 2 2 � ∇Γv = 0, � Γ ∂v ∂n = 0 Table 3: Effective boundary conditions on Γ × (0, T ) for c = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' EBCs on Γ × (0, T ) for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As δ → 0 σ δ → 0 σ δ → α ∈ (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∞) σ δ → ∞ σµ1 → 0 ∂v ∂n = 0 k ∂v ∂n = −αv v = 0 √σµ1 → γ1 ∈ (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∞) k ∂v ∂n = γ1Λ∞ D [v] k ∂v ∂n = γ1Λγ1/α D [v] v = 0 σµ1 → ∞,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ2 → 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 ∂v ∂n = 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 � ∂v ∂n + αv � = 0 v = 0 σµ1 → ∞,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ2 → γ2 ∈ (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∞) ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 � k ∂v ∂n − γ2D∞ D [v] � = 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 � k ∂v ∂n − γ2Dγ2/α D [v] � = 0 v = 0 σµ1 → ∞,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ2 → ∞ ∇Γv = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ ∂v ∂n = 0 ∇Γv = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ ∂v ∂n = 0 v = 0 EBCs on Γ × (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' T ) for (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As δ → 0 µ1δ → 0 µ1δ → β1 ∈ (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∞) µ1δ → ∞ σµ1 → 0 ∂v ∂n = 0 ∂v ∂n = 0 ∂v ∂n = 0 √σµ1 → γ1 ∈ (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∞) ∂v ∂n = 0 k ∂v ∂n = γ1Λβ1/γ1 N [v] k ∂v ∂n = γ1Λ∞ N [v] σµ1 → ∞ ∂v ∂n = 0 k ∂v ∂n = β1 ∂2v ∂τττ 2 1 see next table As µ1δ → ∞,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ1 → ∞ µ2δ → 0 µ2δ → β2 ∈ (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∞) µ2δ → ∞ σµ2 → 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 ∂v ∂n = 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 ∂v ∂n = 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 ∂v ∂n = 0 √σµ2 → γ2 ∈ (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ∞) ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 ∂v ∂n = 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 � k ∂v ∂n − γ2Dβ2/γ2 N [v] � = 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 � k ∂v ∂n − γ2D∞ N [v] � = 0 σµ2 → ∞ ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 ∂v ∂n = 0 ∂v ∂τττ 1 = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ1 � k ∂v ∂n − β2 ∂2v ∂τττ 2 � = 0 ∇Γv = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Γ ∂v ∂n = 0 16 The boundary condition ∂v ∂τττ 1 = 0 on Γ × (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' T ) means that v is a constant in s1 on Γ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' but it may depend on s2 and t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The boundary condition k ∂v ∂n = β1 � ∂2v ∂τττ 2 1 + c ∂2v ∂τττ 2 2 � can be viewed as a second-order partial differential equation on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For H ∈ (0, ∞], with a smooth g(s) being l1−periodic in s1 and l2−periodic in s2, KH D and KH N in Table 2 are defined by � KH D[g], KH N[g] � (s) := (ΨR(s, 0), ΦR(s, 0)), where Ψ and Φ are, respectively, bounded solutions of � ΨRR + Ψs1s1 + cΨs2s2 = 0, R2 × (0, H), Ψ(s, 0) = g(s), Ψ(s, H) = 0, � ΦRR + Φs1s1 + cΦs2s2 = 0, R2 × (0, H), Φ(s, 0) = g(s), ΦR(s, H) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' ΛH D and ΛH N in Table 3 are defined by � ΛH D[g], ΛH N[g] � (s) := � Ψ0 R(s, 0), Φ0 R(s, 0) � , where Ψ0 and Φ0 are the bounded solutions of � Ψ0 RR + Ψ0 s1s1 = 0, R2 × (0, H), Ψ0(s, 0) = g(s), Ψ0(s, H) = 0, � Φ0 RR + Φ0 s1s1 = 0, R2 × (0, H), Φ0(s, 0) = g(s), Φ0 R(s, H) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Finally, DH D and DH N are defined by � DH D[g], DH N [g] � (s2) := (ΨR(s2, 0), ΦR(s2, 0)), where Ψ(s2, R) and Φ(s2, R) are the bounded solutions of � ΨRR + Ψs2s2 = 0, R × (0, H), Ψ(s2, 0) = g(s2), Ψ(s2, H) = 0, � ΦRR + Φs2s2 = 0, R × (0, H), Φ(s2, 0) = g(s2), ΦR(s2, H) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 Definition, existence and uniqueness of weak solutions of effective models We define weak solutions of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) together with some new boundary conditions from Table 2 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Definition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let the test function ξ ∈ C∞(Q1 T ) satisfy ξ = 0 at t = T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (1) A function v is said to be a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary conditions ∂v ∂τττ 1 = 0 and � Γ1 � k ∂v ∂n − B[v] � = 0, where B[v] = −αv, γ2DH D[v] or γ2DH N [v] for H ∈ (0, ∞], if v ∈ V 1,0 2 (Q1 T ) and for almost everywhere fixed t ∈ (0, T ), the trace of v on Γ is a constant in s1, and if for any test function ξ satisfying ∂ξ ∂τττ 1 = 0 on Γ, v satisfies L[v, ξ] = � T 0 � Γ vB[ξ]dsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (2) A function v is said to be a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary condition k ∂v ∂n = B[v], where B[v] = γ1KH D[v](KH N [v]), or γ1ΛH D[v](ΛH N[v]) for H ∈ (0, ∞], if v ∈ V 1,0 2 (Q1 T ) and if for any test function ξ, v satisfies L[v, ξ] = � T 0 � Γ vB[ξ]dsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (3) A function v is a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary condition k ∂v ∂n = β1 � ∂2v ∂τττ 2 1 + c ∂2v ∂τττ 2 2 � for c ∈ [0, 1], if v ∈ V 1,0 2 (Q1 T ) with its trace belonging to L2 � (0, T );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H1(Γ) � , and if for any test function ξ, v satisfies L[v, ξ] = −β1 � T 0 � Γ � ∂v ∂τττ 1 ∂ξ ∂τττ 1 + c ∂v ∂τττ 2 ∂ξ ∂τττ 2 � dsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4) A function v is said to be a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with the boundary conditions ∂v ∂τττ 1 = 0 and � Γ1 � k ∂v ∂n − β2 ∂2v ∂τττ 2 2 � = 0, if v ∈ V 1,0 2 (Q1 T ) with its trace belonging to L2 � (0, T );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' H1(Γ) � and being a constant in s1, and if for any test function ξ satisfying ∂ξ ∂τττ 1 = 0 on Γ, v satisfies L[v, ξ] = −β2 � T 0 � Γ ∂v ∂τττ 2 ∂ξ ∂τττ 2 dsdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2 also works for the existence and uniqueness of weak solutions of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) together with above boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 17 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2 Auxiliary functions We are now in a position to construct two auxiliary functions for Type II condition (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For every t ∈ [0, T ], let ψ(s, r, t) and φ(s, r, t) be bounded solutions of � σψrr + µ1ψs1s1 + µ2ψs2s2 = 0, R2 × (0, δ), ψ(s, 0, t) = g(s), ψ(s, δ, t) = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) � σφrr + µ1φs1s1 + µ2φs2s2 = 0, R2 × (0, δ), φ(s, 0, t) = g(s), φr(s, δ, t) = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) where g(s) := ξ(s, 0, t) is l1−periodic in s1 and l2−periodic in s2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let r = R � σ/µ1 and suppress the time dependence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Then, we define Ψδ(s, R) := ψ(s, R � σ/µ1, t), Φδ(s, R) := φ(s, R � σ/µ1, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Plugging r into (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) leads to � Ψδ RR + Ψδ s1s1 + µ2 µ1 Ψδ s2s2 = 0, R2 × (0, h1), Ψδ(s, 0) = g(s), Ψδ(s, h1) = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) � Φδ RR + Φδ s1s1 + µ2 µ1 Φδ s2s2 = 0, R2 × (0, h1), Φδ(s, 0) = g(s), Φδ R(s, h1) = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4) where h1 = δ � σ/µ1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We next estimate the size of Ψδ R(s, 0) and Φδ R(s, 0) when the thickness of the thin layer is sufficiently small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the case that h1 → 0 as δ → 0, from the maximum principle, we get ����Ψδ R(s, 0) + g(s) h1 ���� = ����� 1 h1 � h1 0 Ψδ R(s, 0) − Ψδ R(s, 0)dR ����� ≤ h1∥Ψδ RR∥L∞(R2×(0,h)) ≤ h1∥g∥C2(R2), from which it follows that Ψδ R(s, 0) = 1 h1 � −g(s) + O(h2) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For the case that h1 ∈ (0, ∞] as δ → 0, from Taylor expansion on Ψδ, we obtain ∥Ψδ R(s, 0)∥L∞(R2) ≤ 2 R∥Ψδ∥L∞(R2×(0,h)) + R∥Ψδ RR∥L∞(R2×(0,h)) ≤ 3∥g∥C2(R2) R .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' As for (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4), the maximum principle also applies to Φδ R(s, 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For the case h1 → 0 as δ → 0, we get ∥Φδ RRRR∥L∞(R2×(0,h1)) = ∥ �∆δ Γ(�∆δ ΓΦδ)∥L∞(R2×(0,h1)) = ∥ �∆δ Γ(�∆δ Γg)∥L∞(R2), where �∆δ ΓΦδ := Φδ s1s1 + µ2 µ1 Φδ s2s2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Since Φδ RRR(s, h1) = − �∆δ ΓΦδ R(s, h1) = 0, for all s ∈ R2, as a result, we derive ∥Φδ RRR∥L∞(R2×(0,h1)) ≤ h1∥Φδ RRRR∥L∞(R2×(0,h1)) ≤ h1∥ �∆δ Γ(�∆δ Γg)∥L∞(R2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Combining this with the boundary condition Φδ RR(s, 0) = − �∆δ ΓΦδ(s, 0) = − �∆δ Γg(s), we arrive at ���Φδ R(s, 0) − h1 �∆δ Γg(s) ��� = ����� � h1 0 �∆δ Γg(s) + Φδ RR(s, R)dR ����� = O(h3 1), which results from ∥Φδ RR + �∆δ Γg∥L∞(R2×(0,h1)) ≤ h1∥Φδ RRR∥L∞(R2×(0,h1)) = O(h2 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 18 For the case that h1 → (0, ∞] as δ → 0, we have ∥Φδ R(s, 0)∥L∞(R2) = O(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We summarize as √σµ1∥Ψδ R(s, 0)∥L∞(R2) = \uf8f1 \uf8f2 \uf8f3 σ δ � −g(s) + O(h2) � , if h1 → 0 as δ → 0, O(√σµ1), if h1 ∈ (0, ∞] as δ → 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5) √σµ1∥Φδ R(s, 0)∥L∞(R2) = \uf8f1 \uf8f2 \uf8f3 µ1δ � �∆δ Γg(s) + O(h2 1) � , if h1 → 0 as δ → 0, O(√σµ1), if h1 ∈ (0, ∞] as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6) Before diving further, we are intended to consider the limiting equation as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the case of c ∈ (0, 1], if h1 → H ∈ (0, ∞] as δ → 0, then (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4) give � ΨRR + Ψs1s1 + cΨs2s2 = 0, R2 × (0, H), Ψ(s, 0) = g(s), Ψ(s, H) = 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � ΦRR + Φs1s1 + cΦs2s2 = 0, R2 × (0, H), Φ(s, 0) = g(s), ΦR(s, H) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7) It is easy to see that each of them has a unique bounded solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We define � KH D[g], KH N[g] � (s) := (ΨR(s, 0), ΦR(s, 0)) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Furthermore, for H ∈ (0, ∞), their analytic formulas are given by KH D[g](s) = − ∞ � n=1 � �λn�en(s)�gn(−1 + 1 tanh( � �λnH) ) − ∞ � n=1 � �λn�en(s)�gn, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='8) KH N [g](s) = − ∞ � n=1 � �λn�en(s)�gn(−1 + tanh( � �λnH)) − ∞ � n=1 � �λn�en(s)�gn, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='9) where �λn and �en(s) are the eigenvalues and the corresponding eigenfunctions of − �∆Γ := ∂2 ∂s2 1 + c ∂2 ∂s2 2 defined on Γ with �gn =< �en, g >:= � Γ �engds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, K∞ D [g](s) = K∞ N [g](s) = −(− �∆Γ)1/2g(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' On the other hand, in the case of c = 0, if h → H ∈ (0, ∞] as δ → 0, then it is easy to find that the limits of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4) are degenerate in the following form � Ψ0 RR + Ψ0 s1s1 = 0, R2 × (0, H), Ψ0(s, 0) = g(s), Ψ0(s, H) = 0, � Φ0 RR + Φ0 s1s1 = 0, R2 × (0, H), Φ0(s, 0) = g(s), Φ0 R(s, H) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='10) Moreover, we define ΛH D[g](s) := Ψ0 R(s, 0), ΛH N[g](s) := Φ0 R(s, 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The analytic formulas for ΛH D[g](s) and ΛH N[g](s) are given using separation of variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' It is straight- forward to see that ΛH D[g](s) = −S(s2) ∞ � n=1 4πn l2 1 coth �2πnH l1 � � l1 0 g(z)cos �2πn(z − s) l1 � dz, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='11) ΛH N[g](s) = −S(s2) ∞ � n=1 4πn l2 1 tanh �2πnH l1 � � l1 0 g(z)cos �2πn(z − s) l1 � dz, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='12) where S(s2) is a periodic function in s2 ∈ [0, l2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From now on, we discuss the existence and uniqueness of the solution of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='10), which is of interest in its own right.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We refer to the book [17] and the references therein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From the maximum principle, it turns out that Ψδ and Φδ are uniformly bounded and equicontinuous on the compact subsets of R2 × (0, H).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Consequently, the Arzela-Ascoli compact theorem ensures that Ψδ → Ψ0, Φδ → Φ0 19 locally uniformly in R2 × (0, H) after passing to a subsequence of δ → 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' moreover, the limiting functions Ψ0 ∈ C � R2 × (0, H) � and Φ0 ∈ C � R2 × (0, H) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Our next task is to establish the uniqueness of Ψ0 and Φ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Following this goal, let Ψ0 1 and Ψ0 2 be two solutions of the former equation in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='10);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' let Φ0 1 and Φ0 2 be two solutions of the latter equation in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Without loss of generality, consider Ψ0 = Ψ0 1 − Ψ0 2 and Φ0 = Φ0 1 − Φ0 2, satisfying � Ψ0 RR + Ψ0 s1s1 = 0, R2 × (0, H), Ψ0(s, 0) = 0, Ψ0(s, H) = 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' � Φ0 RR + Φ0 s1s1 = 0, R2 × (0, H), Φ0(s, 0) = 0, Φ0 R(s, H) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='13) Suppressing the s2 variable, letting W(s1, R) := Ψ0(s1, s2, R) and V (s1, R) := Φ0(s1, s2, R), we have � WRR + Ws1s1 = 0, R × (0, H), W(s, 0) = 0, W(s, H) = 0, � VRR + Vs1s1 = 0, R × (0, H), V (s, 0) = 0, VR(s, H) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From the maximum principle, it turns out that W = V = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Thus, the assertion of uniqueness of Ψ0 and Φ0 is completed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3 Proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 The goal of this subsection is to prove Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 and address EBCs on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2, given any subsequence of δ, we can ensure that u → v weakly in W 1,0 2 (Ω1 × (0, T )), and strongly in C � [0, T ];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L2(Ω1) � after passing to a further subsequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We will show that v is a weak solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) with effective boundary conditions listed in Table 2 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Because of the uniqueness proved in Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2, u → v without passing to any subsequence of δ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Our proof contains two steps: one is for the Dirichlet problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1), and the other is for the Neumann problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Step 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Effective boundary conditions for the Dirichlet problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let ξ ∈ C∞(Ω1 × [0, T ]) with ξ = 0 at t = T and extend ξ to the domain Ω × (0, T ) by defining ξ(x, t) = � ξ(x, t), x ∈ Ω1, ψ(s(x), r(x), t), x ∈ Ω2, where ψ is the solution of the elliptic problem (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1) and ξ ∈ W 1,1 2,0 (QT ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By the weak convergence of {u}δ>0, it follows from Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 that as δ → 0, L[u, ξ] −→L[v, ξ] = −lim δ→0 � T 0 � Ω2 ∇ψ · A∇udxdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='14) In the curvilinear coordinates (s, r), the right-hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='14) gives RHS := − � T 0 � Ω2 ∇ψ · A∇udxdt = − � T 0 � δ 0 � Γ (σψrur + ∇Γψ · A∇Γu) − � T 0 � δ 0 � Γ (σψrur + ∇Γψ · A∇Γu)(2Hr + κr2) − � T 0 � δ 0 � Γ (∇sψ · A∇su − ∇Γψ · A∇Γu)(1 + 2Hr + κr2) =I + II + III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='15) Multiplying (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) by u and performing integration by parts, we obtain I := � δ 0 � l2 0 � l1 0 (σψrur + µ1ψs1us1 + µ2ψs2us2) ds1ds2dr = − � l2 0 � l1 0 σψr(s, 0, t)uds1ds2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18) 20 Subsequently, it follows from (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='3) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5) that |II| = O(δ) √ T(σµ1)1/4(||Ψδ R(s, 0)||L∞(R2))1/2, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19) where Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 and H¨older inequality were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By virtue of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='4), (4) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1), using Taylor expansion on gij(s, r), after a tedious calculation, we get |III| =O � δ �µ1 µ2 + δ2 µ1 µ2 � � T 0 �� Γ � δ 0 σψ2 r + ∇Γψ · A∇Γψ �1/2 dt =O � δ �µ1 µ2 + δ2 µ1 µ2 � √ T(σµ1)1/4||ΨR(s, 0)||1/2 L∞(Γ), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20) where we have used Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In the following, we consider cases: (1) σ δ → 0, (2) σ δ → α ∈ (0, ∞), (3) σ δ → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σ δ → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (1i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ1 → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In view of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='14) - (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20), by H¨older inequality, we have |RHS| ≤C � T 0 �� δ 0 � l2 0 � l1 0 � σψ2 r + µ1ψ2 s1s1 + µ2ψ2 s2s2 � �1/2 dt ≤ C( √ T)max{σ δ , √σµ1 }, where (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5) and Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Thus, we have L[v, ξ] = 0, showing that v satisfies the boundary condition ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (1ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ1 → γ1 ∈ (0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h1 → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18), as δ → 0, if c ∈ (0, 1], we then have I = √σµ1 � l2 0 � l1 0 Ψδ R(s, 0)u → γ1 � Γ vK∞ D [ξ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Otherwise, if c = 0, then I → γ1 � Γ vΛ∞ D [ξ] as δ → 0, where K∞ D [ξ] and Λ∞ D [ξ] are defined in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='7) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='10), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Because of the assumption that δ � µ1/µ2 → 0 as δ → 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='19) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20) give |II+III| → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Hence, for c ∈ (0, 1], we obtain L[v, ξ] = γ1 � T 0 � Γ vK∞ D [ξ], which means that v satisfies k ∂v ∂n = γK∞ D [v] on Γ × (0, T );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' for c = 0, we have L[v, ξ] = γ1 � T 0 � Γ vΛ∞ D [ξ], which means that v satisfies the boundary condition k ∂v ∂n = γ1Λ∞ D [v] on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (1iii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ1 → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h1 → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Divided both sides of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='14) by √σµ1 and sending δ → 0, combining (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='18)-(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20), we obtain � T 0 � Γ vK∞ D [g] = 0, for c ∈ (0, 1] and � T 0 � Γ vΛ∞ D [g] = 0, for c = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='21) In the case of c ∈ (0, 1], (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='21) yields ∇Γv = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By the similar proof in Subcase (1iii) from Step 1 in last section, v satisfies the boundary condition � Γ ∂v ∂n = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 21 In the case of c = 0, it follows from (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='11) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='21) that vs1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From now on, choose the test function ξ satisfying ξs1 = 0 on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let ψ be a constant in s1, and ψ = ψ(s2, r, t) is defined by � σψrr + µ2ψs2s2 = 0, R × (0, δ), ψ(s2, 0, t) = g(s2), ψ(s2, δ, t) = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) where g(s2) := ξ(s2, 0, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' The right-hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='14) now depends on the relationship of δ, σ and µ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Continuing what we have done in last section, let r = R � σ/µ2 and Ψ(s2, R) = ψ(s2, r, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Substituting these into (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22) leads to � ΨRR + Ψs2s2 = 0, R × (0, h2), Ψ(s2, 0) = g(s2), Ψ(s2, h2) = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='23) where h2 = δ � µ2/σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, define Dh D[g] := ΨR(s2, 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We estimate the size of ΨR(s2, 0) as in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='5), resulting in √σµ2∥Ψδ R(s2, 0)∥L∞(R) = \uf8f1 \uf8f2 \uf8f3 σ δ � −g(s2) + O(h2 2) � , if h2 → 0 as δ → 0, O(√σµ2), if h2 ∈ (0, ∞] as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='24) If σµ2 → 0 as δ → 0, then by the similar argument in Subcase (1i), we get L[v, ξ] = 0, showing that v satisfies the boundary condition � Γ1 ∂v ∂n = 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' if √σµ2 → γ2 ∈ (0, ∞) as δ → 0, then by the similar argument in Subcase (1ii), we get L[v, ξ] = γ2 � T 0 � Γ vD∞ D [ξ], showing that v satisfies � Γ1 � k ∂v ∂n − γ2D∞ D [v] � = 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' if σµ2 → ∞ as δ → 0, then by the similar argument in Subcase (1iii) from Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2, we have � T 0 � Γ vD∞ D [g] = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='25) which indicates that vs2 = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Thus, v is a constant on Γ in the spatial variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Assume further that ξ = m(t) on Γ and ψ(s, r, t) = (1 − r/δ) m(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Using the same technique in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28), we get L[v, ξ] = 0 from which v satisfies � Γ ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σ δ → α ∈ (0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ1 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' A combination of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='15) − (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20) gives rise to L[v, ξ] = −α � T 0 � Γ vξ, from which v satisfies the boundary condition k ∂v ∂n = −αv on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ1 → γ1 ∈ (0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Like what we did in Subcase (1ii), as δ → 0, if c ∈ (0, 1], we have L[v, ξ] = γ1 � T 0 � Γ vKγ1/α D [ξ], resulting in the boundary condition k ∂v ∂n = γKγ1/α D [v].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' On the other hand, if c = 0, we then have L[v, ξ] = γ1 � T 0 � Γ vΛγ1/α D [ξ], implying that v satisfies k ∂v ∂n = γ1Λγ1/α D [v] on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 22 Subcase (2iii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ1 → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Following the proof of Subcase (1iii), we are led to � T 0 � Γ vK∞ D [g] = 0 for c ∈ (0, 1], � T 0 � Γ vΛ∞ D [g] = 0, for c = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Therefore, if c ∈ (0, 1], then v satisfies the boundary condition � Γ ∂v ∂n = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' On the other hand, if c = 0, then vs1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By further taking ξ = ξ(s2, r, t) and ψ to be defined in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22), performing the procedure in Subcase (1iii), we arrive at the following results: if σµ2 → 0 as δ → 0, then � Γ1 ∂v ∂n = 0 on Γ × (0, T );' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' if √σµ2 → γ2 ∈ (0, ∞) as δ → 0, then � Γ1 � k ∂v ∂n − γ2D∞ D [v] � = 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' if σµ2 → ∞ as δ → 0, then ∇Γv = 0 and � Γ � k ∂v ∂n + αv � = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σ δ → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ1 → γ1 ∈ [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In view of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='15)- (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20), divided both sides of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='14) by σ/δ and sending δ → 0, we get � T 0 � Γ vξ = 0, from which v satisfies the boundary condition v = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ1 → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For the case of c ∈ (0, 1], using the similar proof in Subcase (3ii) in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2, we have v = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' On the other hand, for the case of c = 0, and h → H ∈ [0, ∞] as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In view of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='14)-(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='20) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='10), if H = 0, then v satisfies the boundary condition v = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Otherwise, if H ∈ (0, ∞], we obtain � T 0 � Γ vΛH D[ξ] = 0, showing that vs1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Again, by taking ξ = ξ(s2, r, t) and ψ defined in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='22), performing the procedure in Subcase (1iii), we have v = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Step 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Effective boundary conditions for the Neumann problem (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let ξ ∈ C∞(Ω1 × [0, T ]) with ξ = 0 at t = T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We extend ξ to the domain Ω × (0, T ) by defining ξ(x, t) = � ξ(x, t), x ∈ Ω1, φ(s(x), r(x), t), x ∈ Ω2, where φ is the unique solution of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2) and ξ ∈ W 1,1 2 (QT ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Due to the weak convergence of u → v as δ → 0, it follows from Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 that L[u, ξ] = − � T 0 � Ω2 ∇φ · A∇udxdt → L[v, ξ] = −lim δ→0 � T 0 � Ω2 ∇φ · A∇udxdt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='26) In the curvilinear coordinates, rewrite the right-hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='26) as RHS := − � T 0 � Ω2 ∇φ · A∇udxdt = I + II + III, where I := − � T 0 � δ 0 � Γ (σφrur + ∇Γφ · A∇u) dsdrdt = − � T 0 � δ 0 � l2 0 � l1 0 σφr(s, 0, t)uds1ds2dt, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='27) |II| =O(δ) � T 0 �� Γ � δ 0 σφ2 r + ∇Γφ · A∇Γφ �1/2 dt = O(δ) √ T(σµ1)1/4(||Φδ R(s, 0)||L∞(Γ))1/2, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28) 23 and |III| =O � δ �µ1 µ2 + δ2 µ1 µ2 � √ T(σµ1)1/4||Φδ R(s, 0)||1/2 L∞(Γ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='29) Next, we consider the following cases: (1)σµ1 → 0, (2)√σµ1 → γ ∈ (0, ∞), (3)σµ1 → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ1 → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Using H¨older inequality and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='2), we get |RHS| =O(1) � T 0 �� δ 0 � Γ σφ2 r + ∇Γφ · A∇Γφ �1/2 �� Ω ∇u · A∇udx �1/2 dt = O( √ T )(σµ1)1/4, where (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6) and Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='1 were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Thus, we have L[v, ξ] = 0, showing that v satisfies the boundary condition ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ1 → γ1 ∈ (0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µ1δ → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h = µ1δ/√σµ1 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Thanks to (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28)- (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='29), we obtain I → 0 and |II + III| → 0, where (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6) was used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' From this, it turns out that L[v, ξ] = 0, showing that v satisfies the effective boundary condition ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (2ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µ1δ → β1 ∈ (0, ∞] as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → β1/γ1 ∈ (0, ∞].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In view of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28)-(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='29) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6), we are led to L[v, ξ] = γ1 � T 0 � Γ vKβ1/γ1 N [ξ] for c ∈ (0, 1], L[v, ξ] = γ1 � T 0 � Γ vΛβ1/γ1 N [ξ] for c = 0, from which v satisfies the boundary condition on Γ × (0, T ): k ∂v ∂n = γ1Kβ1/γ1 N [v] for c ∈ (0, 1] and k ∂v ∂n = γ1Λβ1/γ1 N [v] for c = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Case 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ1 → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µ1δ → β1 ∈ [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h1 → β1/γ1 ∈ [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Combining (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28)-(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='29) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6), we have I → β1 � T 0 � Γ �∆Γξ(s, 0, t)v(s, 0, t), |II + III| → 0, as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Thus, for c ∈ (0, 1], we arrive at L[v, ξ] = β1 � T 0 � Γ v �∆Γξ, from which v satisfies the effective boundary condition k ∂v ∂n = β1 �∆Γv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For c = 0, we have L[v, ξ] = β1 � T 0 � Γ v ∂2ξ ∂τττ 2 1 , from which v satisfies the boundary condition k ∂v ∂n = β1 ∂2v ∂τττ 2 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3ii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' µ1δ → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h1 → H ∈ [0, H] after passing to a subsequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We first consider the case of c ∈ (0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H = 0, we have � T 0 � Γ �∆Γξ(p, 0, t)vdsdt = 0, 24 leading to v(·) = m(t) on Γ for almost t ∈ (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H ∈ (0, ∞], we have � T 0 � Γ vKH N [ξ] = 0, implying that v(·) = m(t) on Γ for almost t ∈ (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Then, we choose a special test function ξ = ξ(t) on Γ and a constant extension in Ω2 such that L[v, ξ] = 0, which shows that v satisfies the boundary condition � T 0 ∂v ∂n = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' For the case of c = 0, if H = 0, we have � T 0 � Γ v ∂2ξ ∂τττ 2 1 dsdt = 0, implying that v(·) = v(s2, t) on Γ for almost t ∈ (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H ∈ (0, ∞], we have � T 0 � Γ vKH N [ξ] = 0, implying that v(·) = v(s2, t) on Γ for almost t ∈ (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We start with the proof by taking a test function ξ satisfying ξs1 = 0 on Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Furthermore, let φ = φ(s2, r, t) be defined by � σφrr + µ2φs2s2 = 0, R × (0, δ), φ(s2, 0, t) = g(s2), φr(s2, δ, t) = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='30) where g(s2) := ξ(p(s2), 0, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Let r = R � σ/µ2 and Φ(s2, R) = φ(s2, r, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Substituting these into (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='30) gives � ΦRR + Φs2s2 = 0, R × (0, h2), Φ(s2, 0) = g(s2), ΦR(s2, h2) = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='31) where h2 = δ � µ2/σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Moreover, define Dh2 N [g] := ΦR(s2, 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' To estimate the size of ΦR(s2, 0) as in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6), we have √σµ2∥Ψδ R(s, 0)∥L∞(Γ) = �� µ2δ � ξs2s2(s, 0, t) + O(h2 2) � , if h2 → 0 as δ → 0, O(√σµ2), if h2 ∈ (0, ∞] as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='32) From now on, given φ = φ(s2, r, t) in Ω2, the following focus on the relationships of δ, σ and µ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' We now consider cases (a)σµ2 → 0, (b)√σµ2 → γ2 ∈ (0, ∞), (c)σµ2 → ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3iia).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ2 → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Like in Case 1, we have L[v, ξ] = 0, showing that v satisfies the boundary condition � Γ1 ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3iib).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' √σµ2 → γ2 ∈ (0, ∞) as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Assume further that µ2δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h → 0 as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By the similar proof as in Case 2, we have L[v, ξ] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' So, v satisfies the effective boundary condition � Γ1 ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' On the other hand, if µ2δ → γ2 ∈ (0, ∞], then by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28)-(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='29) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6), we are led to L[v, ξ] = γ1 � T 0 � Γ vDβ2/γ2 N [ξ], from which v satisfies the boundary condition � Γ1 � k ∂v ∂n − γ1Kβ2/γ2 N [v] � = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Subcase (3iic).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' σµ2 → ∞ as δ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Assume further that µ2δ → β2 ∈ [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' In this case, h2 → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' By virtue of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='28)-(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='29) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='6), we get L[v, ξ] = β2 � T 0 � Γ v ∂2ξ ∂τττ 2 2 , from which v satisfies � Γ1 � k ∂v ∂n − β2 ∂2v ∂τττ 2 2 � = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If µ2δ → ∞, then in this case, h2 → H ∈ [0, ∞] after passing to a subsequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H = 0, then divided both sides of the equation (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='26) by µ2δ and sending δ → 0, we obtain � T 0 � Γ v ∂2ξ ∂τττ 2 2 = 0, implying that v(·) = m(t) on Γ for almost t ∈ (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' If H ∈ (0, ∞], then divided both sides of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='26) by √σµ2 and sending δ → 0, we obtain � T 0 � Γ vDH N [ξ] = 0, implying that v(·) = m(t) on Γ for almost t ∈ (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Therefore, by taking a special test function ξ = ξ(t) on Γ and using a constant extension ξ = ξ(t) in Ω2, we obtain L[v, ξ] = 0, implying that v the boundary condition � Γ ∂v ∂n = 0 on Γ × (0, T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' This completes the whole proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 25 Acknowledgments The author is indebted to his advisor Professor Xuefeng Wang for his guidance and Dr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Yantao Wang for his helpful discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' References [1] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='Brezis, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Caffarelli and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Friedman Reinforcement problems for elliptic equations and varia- tional inequalities, Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Pura Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 123 (1980), 219–246.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [2] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Buttazzo and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Kohn, Reinforcement by a thin layer with oscillating thickness, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Optim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 16 (1987), 247–261.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [3] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Carslaw and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Jaeger, Conduction of heat in solids, Reprint of the second edition, New York, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [4] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Chen, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Pond and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang, Effective boundary conditions resulting from anisotropic and opti- mally aligned coatings: the two dimensional case, Arch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Ration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Mech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 206 (2012), 911–951.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [5] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Geng, Effective Boundary Conditions Arising from the Heat Equation with Three-dimensional Interior Inclusion, preprint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [6] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Gilbarg and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Trudinger, Elliptic partial differential equations of second order, Reprint of the 1998 edition, Springer-Verlag, Berlin, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [7] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang, Error estimates and lifespan of effective boundary conditions for 2- dimensional optimally aligned coatings, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Differential Equations, 303 (2022), 1-41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [8] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang, Using effective boundary conditions to model fast diffusion on a road in a large field, Nonlinearity, 30 (2017), 3853–3894.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [9] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang, Effective boundary conditions for the heat equation with interior inclusion, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 36 (2020), 272–295.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [10] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li, Asymptotic behavior of solutions to elliptic equations in a coated body, Comm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Pure App.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 8 (2009), 1251–1267.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [11] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Rosencrans, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Zhang, Asymptotic analysis of a Dirichlet problem for the heat equation on a coated body, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 137 (2009), 1711-1721.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [12] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Su, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang, Bulk-surface coupling: derivation of two models, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Differential Equations, 289 (2021), 1-34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [13] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Zhang and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Zhang, Asymptotic behavior of Robin problem for heat equation on a coated body, Rocky Mountain J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 42 (2012), 937–958.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [14] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Zhang, Reinforcement of the Poisson equation by a thin layer, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Models Methods Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 21 (2011), 1153–1192.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [15] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content='Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Li and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Vogelius, Gradient estimates for solutions to divergence form elliptic equations with discontinuous coefficients, Arch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Ration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Mech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 153 (2000), 91–151.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [16] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Lions and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Magenes, Non-homogeneous boundary value problems and applications, Springer- Verlag, New York, 1973.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [17] O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Oleinik and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Radkevic, Second-Order Equations With Nonnegative Characteristic Form, Springer New York, New York, 1973.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 26 [18] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Rosencrans and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang, Suppression of the Dirichlet eigenvalues of a coated body, SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 66 (2006), 1895–1916;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Corrigendum, SIAM J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 68 (2008), p1202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [19] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Sanchez-Palencia, Probl`emes de perturbations li´es aux ph´enom`enes de conduction `a travers des couches minces de grande r´esistivit´e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' (French), J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Pures Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 53 (1974), 251–269.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [20] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wang, Effective boundary conditions of diffusion equations on domains containing thin layers (in Chinese), Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Sin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=', 46 (2016), 709-724.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' [21] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' Wloka, Partial differential equations, Cambridge University Press, Cambridge, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} +page_content=' 27' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/LNFRT4oBgHgl3EQf1jiU/content/2301.13657v1.pdf'} diff --git a/MNFLT4oBgHgl3EQfMy8Y/content/tmp_files/2301.12017v1.pdf.txt b/MNFLT4oBgHgl3EQfMy8Y/content/tmp_files/2301.12017v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a71d286806f1fb1f2c60bff518c8c4ab0230797 --- /dev/null +++ b/MNFLT4oBgHgl3EQfMy8Y/content/tmp_files/2301.12017v1.pdf.txt @@ -0,0 +1,1624 @@ +Understanding INT4 Quantization for Transformer Models: +Latency Speedup, Composability, and Failure Cases +Xiaoxia Wu∗, Cheng Li∗, Reza Yazdani Aminabadi +Zhewei Yao, Yuxiong He +Microsoft +{xiaoxiawu, chengli1, yazdani.reza, zheweiyao, yuxhe}@microsoft.com +Abstract +Improving the deployment efficiency of transformer-based language models has been challenging given +their high computation and memory cost. While INT8 quantization has recently been shown to be effective +in reducing both the memory cost and latency while preserving model accuracy, it remains unclear whether +we can leverage INT4 (which doubles peak hardware throughput) to achieve further latency improvement. +In this work, we fully investigate the feasibility of using INT4 quantization for language models, and show +that using INT4 introduces no or negligible accuracy degradation for encoder-only and encoder-decoder +models, but causes a significant accuracy drop for decoder-only models. To materialize the performance +gain using INT4, we develop a highly-optimized end-to-end INT4 encoder inference pipeline supporting +different quantization strategies. Our INT4 pipeline is 8.5× faster for latency-oriented scenarios and up to +3× for throughput-oriented scenarios compared to the inference of FP16, and improves the SOTA BERT +INT8 performance from FasterTransformer by up to 1.7×. We also provide insights into the failure cases +when applying INT4 to decoder-only models, and further explore the compatibility of INT4 quantization +with other compression techniques, like pruning and layer reduction. +1 +Introduction +As pre-trained large language models (LLMs) (Vaswani et al., 2017) such as BERT (Tenney et al., 2019), +BART Lewis et al. (2020), and GPT (Radford et al., 2019) require a significant amount of GPU resources +to deploy, compression becomes a common practice to optimize model inference, especially for resource- +constrained environments. One of the widely used compression techniques is quantization where data are +stored and manipulated in a lower-precision format, such as 8-bit or 4-bit integers instead of 32-bit or 16-bit +floating-point numbers. It not only reduces the amount of memory required to store the model, but also +can leverage the higher GEMM computation throughput for lower-bit data types on supported GPUs (e.g., +peak INT4 Tensor Core TFLOPS doubles that of INT8 and quadruples that of FP16) to improve inference +latency. Note that only quantizing the model weights without computing in lower-bit data types (i.e., keeping +activation in FP16 or FP32) introduces no latency improvement (or even slower due to type conversion at +runtime) but only memory saving. +Recent work proposes techniques to apply INT8 quantization (using INT8 computation where both +weight and activation are quantized, referred to as W8A8) to all linear layers without introducing accuracy +degradation for transformers (Yao et al., 2022; Xiao et al., 2022; Dettmers et al., 2022a,b; Li et al., 2022; +Kim et al., 2021). Yao et al. (2022) also present an INT8 inference pipeline and show good end-to-end (E2E) +performance improvement over FP16 model inference. NVIDIA’s FasterTransformer (NVIDIA, 2023) holds +SOTA open-source INT8 implementations where aggressive quantization are explored: mode-1 quantizes the +∗Equal Contribution. Code will be released soon as a part of https://github.com/microsoft/DeepSpeed +1 +arXiv:2301.12017v1 [cs.CL] 27 Jan 2023 + +attention computation beyond linear layers, and mode-2 further quantizes the residual connection trading off +accuracy for latency. +While we are advancing W8A8 quantization algorithms and implementations proven to be effective for +LLMs, the questions arise: (1) whether INT4 inference (using INT4 computation where both activation +and weight are quantized, referred to as W4A4) is feasible (acceptable accuracy drop) for these models, +and (2) how it can be leveraged for performance improvement on real hardware. Although W4A4 has been +successfully applied to other model types or hardware, e.g., convolution models for image classification with +quantization-aware training strategy (QAT) (Abdolrashidi et al., 2021),1 there is lack of work on exploring +W4A4 for LLMs inference on GPU. Dettmers and Zettlemoyer (2022) show little accuracy loss for LLMs when +only model weights are quantized to 4-bit with post-quantization training (PTQ)2, while the computation is +still in FP16 as the activations are not quantized. Wu et al. (2022) prove that even the binary network can +result in only a small degradation if applying QAT with knowledge distillation (KD) (Hinton et al., 2014) +and longer training, but the activations are quantized to INT8 (using INT8 computation, not INT4). Tang +et al. (2022) are the first to claim to apply W4A4 to BERT for inference with QAT and KD. However, their +quantization method fails to enable W4A4 for all but only the last two layers in a four-layer TinyBERT +model (otherwise causing drastic accuracy drops). Moreover, their E2E INT4 inference lacks implementation +details, with conflicting performance numbers when compared to FasterTransformer (see Appendix C.2). +In this work, we aim not only to better understand the accuracy impact of INT4 quantization on common +LLMs, but also to materialize and maximize the benefit of using INT4 computation in E2E inference, further +improving the SOTA inference performance on LLMs. Specifically, we make the following contributions: +• We explore the feasibility of W4A4 quantization across popular language model types, by leveraging the +recent layer-wise knowledge distillation method for quantization. We show that our W4A4 can achieve +no accuracy loss for the encoder-only models (BERT) on classification problems, negligible accuracy +difference for encoder-decoder models (BART) on summarization tasks, but causes a relatively larger +accuracy drop for decoder-only models (GPT) on autoregressive generation tasks. +• We develop a highly optimized end-to-end encoder model inference pipeline to support INT4 computa- +tion. The pipeline is built with modular components supporting different quantization strategies to +accommodate latency- or throughput-oriented scenarios. Our inference pipeline is up to 8.5×/3× faster +for latency-/throughput-oriented scenarios when compared to HuggingFace FP16 BERT implementation, +and improves the SOTA BERT INT8 performance from NVIDIA FasterTransformer by up to 1.7×. +• To unveil the causes of larger accuracy drop for decoder-only models (GPT) when using INT4 quantiza- +tion, we provide an in-depth analysis of layer normalization, pretraining effect, and attention mechanism. +Additionally, we study the composability of INT4 quantization with other compression techniques, +including pruning and layer-reduction, for encoder-related models. +We defer additional related work to Appendix A. +2 +Model Accuracy for INT4 Quantization +2.1 +Quantization Algorithms and Training +Quantization. +For completeness, we here explain the symmetric and asymmetric quantization algo- +rithms (Yao et al., 2022). +Suppose x ∈ Rd and xq ∈ Rd represent respectively a full-precision and a +quantized vector. The uniform symmetric mapping strategy from x and xint is +x(sym) +q += S +� +clamp(x/S; −2b−1, 2b−1 − 1) +� +, +1QAT requires the full training pipeline by quantizing the weight and activation during the forward process and updating the +weights with gradients computed by straight through estimator (Bengio et al., 2013) or other methods. +2PTQ means the quantized model is arrived directly by mapping the weights from floating-point to low precision values +without the full pipeline training (dataset and backward gradient). +2 + +where clamp restricts the value of its argument to a given range from −2b−1 to 2b−1 − 1, b is the number of +bits used to represent the quantized value, ⌈·⌉ is the rounding operator, and S ∈ R is the scaling factor. For +example, S can be computed as the maximum of the absolute elements in the x vector, i.e., S = max (abs(x)). +On the other hand, the asymmetric mapping strategy can be expressed as +x(asym) +q += S +� +clamp((x − xzero1)/S; 0, 2b−1 − 1) +� ++ xzero1, +where xzero is used as a reference point potentially reducing any bias into the asymmetric vector. The scalar +S can be computed as S = max(x) − min(x) and xzero = min(x). +Throughout the paper, we always do both weight and activation quantization using the method proposed +in Yao et al. (2022). See Appendix B for more details. +Knowledge Distillation. Knowledge distillation (KD) can greatly improve the performance of quantized +transformer models. It trains a smaller quantized model (the student model) by incorporating the knowledge +from the larger full-precision model (the teacher model). This can be done by training the student model to +mimic the behavior of the teacher model on the training dataset, using the output probabilities as a soft +target (Hinton et al., 2014) and the hidden states (and/or attention maps) of each transformer layer to align +feature maps (Jiao et al., 2019; Wang et al., 2020; Bai et al., 2020; Li et al., 2016a; Wu et al., 2022). +Table 1: The best quality for BERT/BART/GPT-type models (two sizes) over the validation datasets, +respectively with metric Accuracy (Acc., higher is better), Rouge Lsum (RLsum, higher is better), and +perplexity (PPL, lower is better). +Models +BERT-base (110M) +BART-base (140M) +GPT2-base (117M) +Tasks +MNLI-m/mm +QQP +CNNDailyMail +XSUM +PTB +WIKI-2 +WIKI-103 +Metrics +Acc/Acc +F1/Acc +R1/R2/RLsum +R1/R2/RL +Perplexity +Perplexity +Perplexity +FP32 (teacher) +84.20/84.67 +87.83/90.95 +45.62/22.85/42.87 +42.18/19.44/34.36 +19.31 +21.02 +17.46 +W4A4 (symmetric) +84.31/84.48 +88.11/91.14 +44.63/21.42/41.92 +41.54/18.61/33.69 +22.17 +27.28 +21.75 +W4A4 (asymmetric) +84.29/84.65 +88.17/91.19 +44.83/21.67/42.08 +41.53/18.56/33.62 +21.72 +25.99 +21.54 +Models +BERT-large (345M) +BART-large (406M) +GPT2-medium (355M) +Tasks +MNLI-m/mm +QQP +CNNDailyMail +XSUM +PTB +WIKI-2 +WIKI-103 +Metrics +Acc/Acc +F1/Acc +R1/R2/RLsum +R1/R2/RL +Perplexity +Perplexity +Perplexity +FP32 (teacher) +86.65/85.91 +88.08/91.07 +44.82/21.67/41.80 +45.42/22.37/37.29 +15.92 +15.92 +12.75 +W4A4 (symmetric) +86.25/86.20 +88.30/91.17 +45.12/21.73/42.31 +44.39/21.28/36.33 +17.69 +19.51 +14.57 +W4A4 (asymmetric) +86.49/86.28 +88.35/91.24 +45.20/21.85/42.40 +44.91/21.74/36.79 +17.32 +18.74 +14.23 +2.2 +INT4 Quantization for Language Models +We perform the 4-bit quantization on all linear layers using QAT and KD. We use BERT-base and BERT-large +(Tenney et al., 2019) as representatives for encoder-only models and fine-tune them on two largest GLUE +tasks, i.e., QQP (Iyer et al., 2017) and MNLI (Williams et al., 2017) for small accuracy variations. We use +GPT2 and GPT2-medium (Radford et al., 2019) as representatives for decoder-only models and fine-tune +them on three causal generation tasks, i.e., PTB (Marcinkiewicz, 1994), Wikitext-2, and Wikitext-103 (Merity +et al., 2017). Finally, we use BART-base and BART-large as representatives for encoder-decoder models and +fine-tune them on two summarization tasks, i.e., CNNDaiyMail (Hermann et al., 2015), and XSum (Narayan +et al., 2018). In order to reduce the hyper-parameters’ effect, e.g., the best quantization configuration for +BERT may be suboptimal for GPT, we exhaustively search hyper-parameters including iterations, learning +rate, dropout, quantization groups, clip values, and knowledge distillation terms for each model and choose +the best one to report here. We include the experimental details in Appendix C and Table C.1. +We present the main results in Table 1 for both symmetric and asymmetric quantizations. We also +provide more detailed iterative-vs-accuracy plots in Figure C.1 on the validation datasets for QAT. For +symmetric quantization, as can be seen, there is no accuracy degradation for BERT models and negligible +drops (≤ 1 point) for BART models, while the 4-bit decoder models, i.e., GPT2 and GPT2-medium, show +a significant drop in perplexity (≥ 1.5 points) compared to the original FP32 models. This suggests that +3 + +Figure 1: CUTLASS INT4 vs. INT8 GEMM performance comparison across different batch size×sequence +length (M) for BERT-base and BERT-large GEMM shapes (N and K). We use the best GEMM schedule for +different inputs identified with the CUTLASS profiler. Left axis shows the throughput achieved (Peak INT8 +and INT4 Tensor TOPS is 309.7 and 619.3 TFLOPS on A6000 GPU) and the right axis shows the speedup +of INT4 over INT8. +classification/summarization tasks using encoder-only/encoder-decoder models are much more robust to +quantization when compared to auto-regressive generation tasks using decoder-only models. +Asymmetric quantization generally improves the accuracy performance over symmetric quantization since +it better utilizes the quantization range. One notable thing is that even with a better quantization scheme +(i.e., asymmetric quantization) and exhaustive hyper-parameter tuning, decoder-only models still have larger +quality degradation compared to encoder-only and encoder-decoder models. To provide more insight into +why decoder-only models are more sensitive to INT4 quantization, we give a detailed analysis in Section 4. +3 +Highly Optimized INT4 Encoder Inference +To materialize and maximize the benefits of using INT4 computation in model inference, we develop a set of +custom GPU kernels and an E2E highly optimized pipeline to support inference with INT4 (as well as INT8) +quantized encoder models. We adopt the system optimizations described in (Yao et al., 2022) and (Aminabadi +et al., 2022) when applicable, and take advantage of FlashAttention (Dao et al., 2022) and the CUDA +graph (NVIDIA, 2021) to further improve the performance. Moreover, we explore different quantization +strategies for latency- or throughput-oriented scenarios. The software design and implementation also largely +apply to other model types, e.g., GPT decoders if the accuracy drop can be resolved. +We conduct the performance experiments on a Lambda A6000 workstation Lambda (2023) (2×A6000- +48GB-GPU, 256GB DRAM, and 2TB NVME), with the following software setup: HuggingFace transformers +4.25.1, NVIDIA FasterTransformer v5.2.1, PyTorch: 1.12.1, cuda 11.7, and cutlass v2.6.0. Currently, INT4 +GEMM is not supported by CUBLAS, and is only available through CUTLASS (NVIDIA, 2017) and we use +that to support the INT4 computation in model inference. +3.1 +INT4 GEMM +INT4 Tensor Core performance (peak TFLOPS) theoretically doubles INT8 throughput on supported NVIDIA +GPUs. However, to achieve the 2× speedup, the GEMM input shapes have to be large enough (being compute- +intensive). The linear layers that are quantized and computed with INT4 data in the encoder model inference +are QKV projection, attention output, MLP intermediate, and MLP output GEMM. The GEMM shapes +(M-N-K) for these layers are (bs× seq −3h−h), (bs× seq −h−h), (bs× seq −4h−h) and (bs× seq −h−4h) +respectively, where bs and seq are input batch size and sequence length, and h is the model hidden dimension. +These shapes set the upper-bound performance improvement we can achieve with INT4 over INT8 GEMM +for a given model. +4 + +CUTLASS INT4 vs. INT8 GEMM for Shapes in BERT-base +500 +2 +INT4 TFLOPS +INT8 TFLOPS +Speedup INT4 over INT8 +400 +1.8 +Speedup +9300 +1.6 +0 +F200 +1.4 +100 +1.2 +0 +1 +4h +.4h +512-h- +32-h- +512-3h- +512-h- +12288-h- +3072-3h +3072-h- +12288-h- +M-N-K, h = 768CUTLASS INT4 vs. INT8 GEMM for Shapes in BERT-large +500 +2 +INT4TFLOPS +INT8 TFLOPS +Speedup INT4 over INT8 +400 +1.8 +Speedup +9300 +1.6 +0 +F200 +1.4 +山 +100 +1.2 +0 +1 +4h- +.4h +4h +.4h +32-h-h +512-h-h +32- +32-h- +3072-h-h +512-3h- +512-h- +3072-3h +3072-h- +12288-h- +M-N-K, h = 1024Figure 1 shows the performance comparison between INT4 and INT8 GEMM for common shapes in +BERT-base and BERT-large model. We can see that the larger the input shape, the higher the speedup. +While the INT4 GEMM speedup for BERT-large are overall higher than BERT-base as the model hidden +dimension is larger (1024 vs. 768), within a model the four GEMM can have very different achieved INT4 +speedup given the same input, i.e., bs × seq. For example, with bs × seq = 12288 for BERT-large, the +attention output GEMM (12288-h-h) only achieves 1.46× speedup while the MLP output GEMM (12288-h-4h) +achieves 1.96× when using INT4 over INT8 computation. Combining with the quantization/dequantization +overhead (see Section 3.2), this difference suggests the need for tunable quantization strategies (enable/disable +quantization on certain GEMM parts) depending on the input shape. +3.2 +Holistic Optimizations of End-to-end Inference +(a) +(b) +Figure 2: E2E latency speedup of (a) our INT4 over INT8 with all four parts quantized (i4-qall and i8-qall), +and (b) our INT4 with best quantization strategy (i4-qbest) over Fastertransformer INT8 (FT-i8) on A6000. +Figure 3: E2E latency speedup of FasterTransformer INT8 (FT-i8), our IN8 with all quantization (q=i8-qall), +and our INT4 with best quantization strategy (i4-qbest) over HuggingFace FP16 (HF-fp16) inference. +While INT4 computation introduces performance improvement for the linear layers, there are other major +components in between using FP16 data types (e.g., layer normalization, elementwise operations, etc.). The +E2E inference requires quantizing/dequantizating the activations before/after the lower-bit GEMM operations. +Moreover, the improvement from INT4 and the quantization/dequantization overhead are both model- and +input-dependent. Depending on the deployment scenarios (latency- or throughput-oriented), the optimal +quantization strategies can be different. Thus, maximizing the gain from using INT4 computation requires +holistic optimizations of the E2E model inference. +The quantization/dequantization of activations are memory-bound operations and introduce nontrivial +overhead. Similar to Yao et al. (2022), we fuse the quantization operation for activation with its previous +element-bias-add, GELU, or layer normalization operation into a single GPU kernel; and fuse the dequantiza- +tion operation with the INT4 GEMM kernel to avoid extra data movement to global GPU memory. Since +the current PyTorch does not support the INT4 tensor data type yet, we pack INT4 data into INT8 tensors +when invoking our customized kernels. +5 + +E2E INT4-qall Speedup over INT8-qall +1.7 +BERT-base BERT-large +1.6 +1.5 +Speedup +1.4 +1.3 +1.2 +1.1 +1 +(1-32)(1-128)(8-32)。(8-128)(8-384)(32-128) (32-384) (32-512) +batch size - seq. lengthE2E INT4-qbest Speedup over FT-INT8 +1.8 +qbest = q3 +BERT-base +eBERT-large +qbest = qall +1.7 +1.6 +1.5 +1.4 +1.3 +1.2 +1.1 +1 +(1-32) +(1-128)( +(8-32)(8-128)(8-384)(32-128)(32-384)(32-512) +batch size - seq. lengthE2E BERT-base Speedup over HF-fp16 +9 +HF-fp16/FT-i8 +3 HF-fp16/i8-qall HF-fp16/i4-qbest +8 +7 +Speedup +6 +5 +4 +3 +2 +1 +)(8-32) (8-128)(8-384) (32-128)(32-384) (32-512) +(1-32)(1-128) +batch size - seq. lengthE2E BERT-large Speedup over HF-fp16 +9 +HF-fp16/FT-i8 HF-fp16/i8-qall HF-fp16/i4-qbest +8 +7 +Speedup +6 +5 +4 +3 +2 +1 +(1-32)(1-128) (8-32) (8-128)(8-384) (32-128) (32-384) (32-512) +batch size - seq. lengthFlashAttention (Dao et al., 2022) has been shown to largely improve the attention computation performance, +especially for large batch sizes and sequence lengths. We integrate FlashAttention into our inference pipeline +to speed up the attention computation (in FP16). CUDA graph (NVIDIA, 2021) was introduced by NVIDIA +to reduce GPU kernel launching overhead. For small batch sizes and short sequence lengths, the kernel +launching overhead is non-negligible, thus we enable CUDA graph in our inference pipeline to minimize such +overhead. +A model deployment scenario can be either latency-sensitive or throughput-oriented, thus different batch +sizes and sequence lengths are used for different cases. As shown in Section 3.1, the gain from INT4 is input +(decides GEMM shapes) dependent. The memory-bound quantization/dequantization operations introduce +input-dependent (i.e., the size of activations) overhead as well. Due to the various model sizes (particularly +the hidden dimension, h), input shapes, and hardwares, the four linear layers for quantization have different +trade-offs between the gain and overhead. For example, for low bs × seq inference with BERT models, +quantization of QKV projection, attention output, and MLP output might not result in E2E performance +improvement. If so, we can skip the quantization of these three parts in inference (note that using a higher-bit +computation data type for a QAT model does not degrade the inference accuracy). +As such, we develop the four model parts as modular components where quantization can be enabled or +disabled separately in the inference pipeline. Different quantization strategies can be applied given a target +scenario and hardware. Also, the GEMM schedules used in inference are pre-tuned (with CUTLASS profiler) +for the best performance in the deployment environment as well. +3.3 +End-to-end Inference Performance Results +We measure the E2E BERT model INT4 (prefixed with i4-) and INT8 (prefixed with i8-) latency with our +inference pipeline and compare it with the HuggingFace FP16 implementation (noted as HF-fp16) as well as +the SOTA INT8 implementation (noted as FT-i8) from NVIDIA FasterTransformer (NVIDIA, 2023). The +input batch size and sequence length are selected to cover both latency- and throughput-oriented scenarios. +We explore different quantization strategies (suffix in name to note what is quantized) with the inference +pipeline and show the effectiveness of such tuning. We use symmetric quantization for the BERT models in +the experiments as the earlier section shows no accuracy drop and it is faster than asymmetric quantization +because of less required computation for bias term. +Figure 2a shows the E2E speedup of our INT4 over our INT8 inference when quantizing all four parts. +Cross-comparing it with Figure 1 which indicates the upper bound of the E2E INT4-vs-INT8 speedup, we +can see that the inference pipeline design achieves well the goal of maximizing the performance gain from +using INT4 computation. Figure 2b compares our best INT4 inference with the Fastertansformer INT8 +(using mode-1 as model-2 trades off accuracy for better latency) inference. Note that other than the four +parts we quantize in our pipeline, Fastertansformer INT8 also quantizes attention computation while we use +FP16 FlashAttention (see Section 3.2). As annotated, the best quantization strategy for (bs − seq) (1-32), +(1-128) and (8-32) is to only quantize the MLP intermediate GEMM (q3). For larger batch sizes and sequence +lengths, the best configuration is to quantize all four parts. We show that our highly-optimized INT4 inference +improves the SOTA BERT model performance by up to 1.7× as compared to FP-INT8, while model quality +maintains. +Figure 3 presents the speedup of our inference and FasterTransformer pipelines over HuggingFace FP16 +inference, a common baseline for comparison. Our INT4 inference is up to 8.5× faster for latency-oriented +scenarios and up to 3× for throughput-oriented scenarios. Note that we focus on maximizing the performance +gain from using INT4 computation in this work, thus orthogonal optimizations from FasterTransformer (e.g., +padding removal) or other work are applicable to our INT4 inference design, and can further improve the +inference performance. +6 + +4 +Failure Cases: Understanding the Quality Degradation of INT4 +Decoder Models +For W4A4 GPT models, we have made heavy efforts to tune and distill but their results are still far away +from the FP32 counterparts. In this section, we present several analyses of the causes of such degradation, +including +(1) Layer Normalization (LN). The position of LN is different for encoder and decoder models: LN +for BERT and BART happens after each sublayer’s residual connection (“Post-LN") (Vaswani et al., 2017), +while LN for GPT models operates at the beginning of each sublayer before adding to the residual values +(“Pre-LN") (Xiong et al., 2020). Compared to Pre-LN, Post-LN removes the mean and variance shift caused +by the residual connection and activation functions, which might make the network more robust. A possible +conjecture is that the good quality of INT4 BERT/BART is due to the effect of Post-LN, which thus leads +the models to less sensitivity to quantization. +(2) Pretraining Effect. The activation range for decoder models can vary significantly for different +layers and for different linear modules. A possible conjecture that pretraining with a dataset of a large scale, +such as billions of examples, may exacerbate this issue by introducing more diversity in the input activations, +which could lead to less optimal quantization performance. +(3) Attention Mechanism. GPT models use casual-self-attention mechanism to weight the importance +of each word in the input and generate tokens in a sequential manner (autoregressive generation), while BART +uses encoder-decoder attention mechanism plus casual-self-attention. As such, for the first few generated +tokens, BART can still gather information from the encoder-decoder attention which can potentially reduce +the quantization error by averaging attention information, while GPT does not have this ability. +0 +200 +400 +600 +800 +Iterations (x500) +0 +2 +4 +6 +8 +10 +Perplexity Gaps (teacher-w4a4) +GPT2-Base: Perplexity Gaps + Pre-LN + Post-LN +0 +50 100150200250300350400450 +Iterations (x500) +0 +2 +4 +6 +8 +10 +Perplexity Gaps (teacher-w4a4) +GPT2-Medium: Perplexity Gaps + Pre-LN + Post-LN +Figure 4: The quality gaps between W4A4 and FP32 models, respectively for GPT2-PreLN (blue) and +GPT2-PostLN (orange). +Layer Normalization. +To understand if pre-LN and post-LN lead to a significant difference on the +quantization, we design the following experiments: +(1) As GPT2 is by default using Pre-LN (GPT2-PreLN), we construct a model (GPT2-PostLN) by replacing +the pre-LN with post-LN. In order to have a fair comparison between the quantization results of GPT2-PreLN +and GPT2-PostLN, we directly fine-tune both models on Wikitext-103 from scratch, and the perplexities +are 17.88 (PreLN) and 18.95 (PostLN) for GPT2-Medium, and 18.76 (PreLN) and 19.46 (PostLN) for +GPT2-base.3 +3Compared to Wikitext-2 and PTB, Wikitext-103 is a considerable larger dataset and thus arrived at a low perplexity even +from scratch, closer to results of the pretrained ones. +7 + +(2) We take the above FP32 checkpoints and apply QAT with KD to obtain the best W4A4 models. The the +perplexities for W4A4 are 18.66 (PreLN) and 19.79 (PostLN) for GPT2-Medium, and 20.46 (PreLN) and +21.73 (PostLN) for GPT2-base. We then calculate the perplexity gaps between the W4A4 and FP32 models. +We report the results in Figure 4 of the two perplexity-gap curves for W4A4 and FP32 models, depicted +by the blue curve of GPT2-PreLN and orange curve of GPT2-PostLN. The overlap phenomenon at the end +of the training, respectively, demonstrates that LN may not directly affect the performance degradation for +decoder-only models.. +Pretraining Effect. +Despite obtaining negative results on the position of layer normalization, we have +identified an intriguing observation in regard to models trained from scratch. Our experiments reveal that +the gap between the student and teacher models in terms of perplexity (PPL) is smaller when training from +scratch (20.46 ppl and 18.76 ppl for INT4 and FP32, respectively) as compared to utilizing a pretrained +GPT2 model (21.54 ppl for INT4 and 17.46 for FP32). This observation raises questions about the potential +negative effort of pretraining in the context of quantization, as the model trained from scratch appears to +perform better. +0 +200 +400 +600 +800 +1000 +Position +2 +4 +6 +8 +10 +Max-Min +Layer 1 +Pretrained +Scratch +0 +200 +400 +600 +800 +1000 +Position +1 +2 +3 +4 +Max-Min +Layer 5 +0 +200 +400 +600 +800 +1000 +Position +1 +2 +3 +4 +5 +Max-Min +Layer 9 +0 +200 +400 +600 +800 +1000 +Position +2 +4 +6 +Max-Min +Layer 12 +GPT2 layer: Max-Min Activation over hidden dimension (3072) of FC2 +Figure 5: The gaps between the minimum and maximum activations at certain layers (Layer 1, 5, 9, and 12) +in the second fully-connected linear module. The gaps are plotted with respect to position and the average is +being taken over 8 batch sizes, with one standard deviation shaded region. +To understand this, we compare the position-wise activation range between the fined-tuned models from +pretrained checkpoint and from scratch (referred to as “positional activation"). This provides a token-level +understanding on the quantization range. The results are shown in Figure 5 and it reveals the higher +positional-activation range of the pretrained model as compared to the scratch-trained model. This further +supports the hypothesis that pretraining on large diverse datasets may lead to a wider range of activation +values, and thus may be suboptimal for quantization as compared to models trained from scratch. +Attention Mechanism. +To gain insight into the impact of different attention mechanisms (encoder-decoder +attention and causal-self-attention) on quantization errors, we conduct a comparison of BART-large and +GPT2-medium models. We evaluate the “positional perplexity" of both FP32 and W4A4 models on the +CNNDailyMail dataset for BART and Wikitext-2 dataset for GPT. The results are depicted in Figure 6. We +make the following observations: +8 + +0 +20 +40 +60 +80 +100 120 +Position +0.0 +2.5 +5.0 +7.5 +10.0 +12.5 +15.0 +17.5 +20.0 +Perplexity +BART: Summarization (CNNDailyMail) +Teacher (fp32) +Student (w4a4) +0 +50 +100 +150 +200 +Position +15.0 +17.5 +20.0 +22.5 +25.0 +27.5 +30.0 +32.5 +35.0 +Perplexity +GPT2: Generation (wikitext2) +0 +200 400 600 800 1000 +101 +102 +103 +Zoom Out +Teacher (fp32) +Student (w4a4) +Figure 6: The positional perplexity across the full sequence for BART and GPT2 models. +(1) The curves for GPT, whether it is the teacher or student model, tends to exhibit a downward trend. +The token losses at early positions are significantly higher than those at later positions. Conversely, the +curves for both the teacher and student models of BART exhibit a mild upward trend, with token losses at +later positions being no better than those at earlier positions. +(2) The perplexity degradation from quantization for the BART model is small, with a maximum gap +of 2.5 ppl at the end of the sequence. In contrast, the GPT model experiences large accuracy loss from +quantization, with a maximum gap of over 100 ppl at the first tens of tokens of the sequence and around 2 +ppl gap at later. +Both phenomena highlight the importance of the additional encoder-decoder attention mechanism. For +causal-self-attention-only models (i.e., GPT), the next-generated token can only use the information from +previous word. As such, (1) the earlier positions have less information to retrieve, which leads to larger ppl +scores; (2) the INT4 model has significant perplexity degradation at the beginning positions compared to +FP32 model due to the information noise from quantization. Thanks to the encoder-decoder attention, INT4 +BART model has relatively (1) stable perplexity for all positions and (2) consistent the positional perplexity +degradation as compared to FP32 counterpart. +5 +Composability of INT4 Quantization +In this section, we examine the composability of W4A4 to identify techniques that can be used to further +accelerate INT4 inference. Specifically, we investigate the potential of combining INT4 quantization with +other compression techniques, such as pruning and layer reduction. Our study is based on the observation +that encoder-related models, such as BERT and BART, demonstrate robustness to W4A4 compression as +shown in Table 1. +5.1 +Composing Semi-structured Pruning with INT4 +We focus on combining semi-structured pruning with W4A4. Specifically, we investigate the semi-structured +sparsity called Pair-(N:M) which allows for accelerated execution on NVIDIA Ampere GPUs (Mishra et al., +2021; Holmes et al., 2022). Pair-(N:M) sparsity structure means that there are N zero-entries for every M +elements. We take BERT-base as an example, as Quantization-Aware Training with Knowledge Distillation +for W4A4 models has been shown to lead to better accuracy than its FP32 counterpart. We follow the +training recipe described in Wu et al. (2022). +9 + +0 +5 +10 +15 +20 +25 +30 +35 +Iterations (x1000) +83.0 +83.5 +84.0 +84.5 +85.0 +Val. Acc. (MNLI-m) +Fine-tuning MNLI with 3 epochs: +Method | Best Acc (MNLI-m/-mm) : +Q=>P | 84.18/84.54 +P=>Q | 84.31/84.79 +0 +50 +100 +150 +200 +Iterations (x1000) +Fine-tuning MNLI with 18 epochs: +Method | Best Acc (MNLI-m/-mm) : +Q=>P | 84.33/84.89 +P=>Q | 84.56/85.04 +0 +50 +100 +150 +200 +250 +Iterations (x1000) +Fine-tuning MNLI with 21 epochs: +Method | Best Acc (MNLI-m/-mm) : +Q=>P | 84.6/84.76 +P=>Q | 84.51/85.04 +Figure 7: The validation accuracy (Val. Acc.) of the W4A4+50% sparsity (i.e., Pair-(2:4)) BERT-base. +We compare the order of pruning and quantization. Q=>P (orange solid curve) means the quantization +algorithm is in front of the pruning algorithm. P=>Q (blue dash curve) is the opposite. From left to right +plots, the difference is the training epochs (see title). +Algorithm Design Order between Pruning and INT4. When combining the two compression +techniques pruning and quantization, a natural question would be the ordering in the forward pass of the +two: should we put quantization in front of pruning (e.g. Quant(Prune(W) or P=>Q), or vice versa (e.g. +Prune(Quant(W) or Q=>P). To understand this, we fine-tune on MNLI with different training epochs using +simplest ℓ1 pruning method (Han et al., 2015, 2016). ℓ1 pruning method prunes those small absolute value to +be zero while keeping those large weight value untouched. The ℓ1 pruning mask is determined by the absolute +value of the weight matrix of the teacher models and it remains fixed throughout the training. +We plot the accuracy on the validation dataset in Figure 7. As can be seen, for shorter training time, +P=>Q is better that Q=>P. However, the benefits of P=>Q start to diminish as we increase the training +epochs. Overall, it is generally recommended to perform pruning before quantization, because pruning +removes unnecessary weights from the model. As such, it can help mitigate the loss of precision caused by +quantization and make the quantization process more effective. +With the decision to use the pruning-quantization order, we trained an INT4 BERT-base model with both +50% and 75% sparsity and reported the best validation results in Table 2. We found that a 75% sparsity level +results in an accuracy drop of 0.79/1.6 for the MNLI-m/mm tasks. Therefore, if maintaining high accuracy is +a priority, using a 50% sparsity level for W4A4 models is recommended. In the appendix, we also present the +results of applying 50% sparsity to W4A4 models for 8 GLUE tasks and confirm that the average GLUE +scores are similar to those of the original FP32 models. +Table 2: Quantization (Q) and Pruning (P) for 50% or 75% sparsity. +Tasks +Teacher +Epoch-3 +Epoch-21: P+Q +MNLI- +FP32 +Q only +50% sparisty +75% sparisty +m/mm +84.9/85.6 +84.8/85.2 +84.56/85.04 +84.11/83.99 +5.2 +Composing Layer-reduction with Quantization +Reducing the depth of a model, also known as layer-reduction, is a more straightforward method to improve +inference latency as it requires no modifications to the single-layer implementation (e.g. GPU kernels). +However, it should be noted that these layer-reduced models may not be able to capture the same level +of complexity or learn the same representations as the original models. To understand the compatibility +of layer-reduction and quantization as well as the trade-off between model depth and quality, we perform +detailed study on an encoder-decoder model. +10 + +Our implementation of layer-reduction strategies and fine-tuning recipes follows the work in Li et al. +(2022)4. However, there are two key differences: (1) Our quantization algorithm, described in Section 2.1, +differs from the one used in Li et al. (2022).5 (2) While Li et al. (2022) uses 8-bit activations, we trained our +model with 4-bit activations. +More Encoder or More Decoder? When applying layer-reduction for encoder-decoder model, we +need to decide the number of encoder and decoder layers. For example, when the depth is fixed at four layers, +should we have more encoder layers (3-encoder and 1-decoder), more decoder layers (1-encoder and 3-decoder), +or an equal number of layers for both (2-encoder and 2-decoder)? We investigate different scenarios of +x-encoder and y-decoder layers, where x + y ∈ {9, 7} and x ∈ {6, 5, 4, 3} for the case of x + y = 9, and +x ∈ {3, 2, 1} for the case of x + y = 4. We train our models for 10 epochs with a fixed random seed and a +learning rate of 5e-5. +The results are reported in Table 3. A comparison of the results within the same depth (i.e., 9, 7, and +4) reveals that it is beneficial to have more encoder layers than decoder layers, and that the decoder layers +should be greater than one. Particularly, our experiments demonstrate that the performance of a 9-layer +W4A4 BART model (with 6 encoder layers and 3 decoder layers) can be maintained at an acceptable level, +which is only 0.6 lower than the 12-layer INT4 on the CNNDailyMail dataset. This represents a potential +latency improvement of about 50% for the decoder part while experiencing a minor accuracy drop. +Table 3: The W4A4 model with layer-reduction. For references, the original W4A4 BART-base (6-encoder +and 6-decoder) on CNNDailyMail and XSUM are respectively 44.83/21.67/42.08 and 41.53/18.56/33.62. +Encoder (Decoder) +Six (Three) +Five (Four) +Four (Five) +Three (Six) +Four (Four) +CNNDailyMail +44.23/21.07/41.58 +44.15/21.02/41.45 +43.96/20.9/41.26 +43.59/20.44/40.82 +43.77/20.61/41.09 +XSUM +40.61/17.83/32.90 +40.30/17.53/32.61 +40.18/17.47/32.43 +39.30/16.76/31.64 +39.75/17.10/32.06 +Encoder (Decoder) +Six (One) +Five (Two) +Four (Three) +Three (Four) +Three (Three) +CNNDailyMail +42.48/19.83/40.13 +43.55/20.56/40.99 +43.48/20.38/40.85 +43.29/20.14/40.54 +42.90/19.85/40.21 +XSUM +38.23/16.45/31.49 +39.52/17.03/31.98 +39.43/16.89/31.80 +38.86/16.46/31.31 +38.22/15.91/30.63 +Encoder (Decoder) +Three (One) +Two (Two) +One (Three) +CNNDailyMail +41.26/18.58/38.93 +41.83/18.82/39.20 +41.40/18.39/38.68 +XSUM +35.88/14.71/29.34 +36.09/14.39/29.01 +34.69/13.22/27.64 +Summary. +We demonstrate that it is possible to combine INT4 quantization with other compression +techniques, like composing INT4 and 50% Ampere-structure sparisty with aronund 0.5 GLUE points +degradation and composing INT4 and 25% layer reduction without causing much degradation on summarization +tasks. Fully investing the composability of quantization and other methods is beyond the scope of our paper +and we leave it as future work. +6 +Conclusions +Improving the inference efficiency of language models has been increasingly critical given their growing +adoption but high compute resource requirements. While quantization techniques enabling INT8 computation +on these language models have been well explored recently, how to fully unlock the INT4 computation power +of GPU is an emerging and unanswered question. In this work, we thoroughly investigate the feasibility of +applying INT4 quantization to language models, and our INT4 encoder inference pipeline shows an up to +1.7× latency improvement over SOTA INT8 inference. We also provide an in-depth analysis of the accuracy +drop for decoder models when using INT4 quantization, and study the composability of INT4 quantization +for encoder-related model with other compression techniques. +4https://github.com/amazon-science/dq-bart +5We did a comparison of the two quantization algorithms and found that the algorithm for INT4 presented in Section 2.1 has +better accuracy than that in Li et al. (2022) +11 + +References +Abdolrashidi, A., Wang, L., Agrawal, S., Malmaud, J., Rybakov, O., Leichner, C., and Lew, L. (2021). +Pareto-optimal quantized resnet is mostly 4-bit. In Proceedings of the IEEE/CVF Conference on Computer +Vision and Pattern Recognition, pages 3091–3099. +Aminabadi, R. Y., Rajbhandari, S., Zhang, M., Awan, A. A., Li, C., Li, D., Zheng, E., Rasley, J., Smith, +S., Ruwase, O., et al. (2022). Deepspeed inference: Enabling efficient inference of transformer models at +unprecedented scale. arXiv preprint arXiv:2207.00032. +Bai, H., Zhang, W., Hou, L., Shang, L., Jin, J., Jiang, X., Liu, Q., Lyu, M., and King, I. (2020). Binarybert: +Pushing the limit of bert quantization. arXiv preprint arXiv:2012.15701. +Bengio, Y., Léonard, N., and Courville, A. (2013). Estimating or propagating gradients through stochastic +neurons for conditional computation. arXiv preprint arXiv:1308.3432. +Cer, D., Diab, M., Agirre, E., Lopez-Gazpio, I., and Specia, L. (2017). Semeval-2017 task 1: Semantic textual +similarity-multilingual and cross-lingual focused evaluation. arXiv preprint arXiv:1708.00055. +Chung, I., Kim, B., Choi, Y., Kwon, S. J., Jeon, Y., Park, B., Kim, S., and Lee, D. (2020). Extremely low bit +transformer quantization for on-device neural machine translation. arXiv preprint arXiv:2009.07453. +Dagan, I., Roth, D., Sammons, M., and Zanzotto, F. M. (2013). Recognizing textual entailment: Models and +applications. Synthesis Lectures on Human Language Technologies, 6(4):1–220. +Dao, T., Fu, D. Y., Ermon, S., Rudra, A., and Ré, C. (2022). Flashattention: Fast and memory-efficient +exact attention with io-awareness. arXiv preprint arXiv:2205.14135. +Dehghani, M., Gouws, S., Vinyals, O., Uszkoreit, J., and Kaiser, Ł. (2018). Universal transformers. arXiv +preprint arXiv:1807.03819. +Dettmers, T., Lewis, M., Belkada, Y., and Zettlemoyer, L. (2022a). GPT3.int8(): 8-bit matrix multiplication +for transformers at scale. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in +Neural Information Processing Systems. +Dettmers, T., Lewis, M., Belkada, Y., and Zettlemoyer, L. (2022b). Llm. int8 (): 8-bit matrix multiplication +for transformers at scale. arXiv preprint arXiv:2208.07339. +Dettmers, T. and Zettlemoyer, L. (2022). The case for 4-bit precision: k-bit inference scaling laws. arXiv +preprint arXiv:2212.09720. +Dolan, W. B. and Brockett, C. (2005). Automatically constructing a corpus of sentential paraphrases. In +Proceedings of the Third International Workshop on Paraphrasing (IWP2005). +Dong, Z., Yao, Z., Gholami, A., Mahoney, M. W., and Keutzer, K. (2019). HAWQ: Hessian aware quantization +of neural networks with mixed-precision. In Proceedings of the IEEE International Conference on Computer +Vision, pages 293–302. +Fan, A., Grave, E., and Joulin, A. (2019). Reducing transformer depth on demand with structured dropout. +arXiv preprint arXiv:1909.11556. +Gholami, A., Kim, S., Dong, Z., Yao, Z., Mahoney, M. W., and Keutzer, K. (2021). A survey of quantization +methods for efficient neural network inference. arXiv preprint arXiv:2103.13630. +Gordon, M. A., Duh, K., and Andrews, N. (2020). Compressing bert: Studying the effects of weight pruning +on transfer learning. arXiv preprint arXiv:2002.08307. +12 + +Han, S., Mao, H., and Dally, W. J. (2016). Deep compression: Compressing deep neural networks with +pruning, trained quantization and huffman coding. International Conference on Learning Representations. +Han, S., Pool, J., Tran, J., and Dally, W. (2015). Learning both weights and connections for efficient neural +network. In Advances in neural information processing systems, pages 1135–1143. +Han, T., Zhang, T., Li, D., Liu, G., Tian, L., Xie, D., and Shan, Y. S. (2020). Convolutional neural network +with int4 optimization on xilinx devices. Xilinx White Paper, WP521. +Hermann, K. M., Kocisky, T., Grefenstette, E., Espeholt, L., Kay, W., Suleyman, M., and Blunsom, P. (2015). +Teaching machines to read and comprehend. arXiv preprint arXiv:1506.03340. +Hinton, G., Vinyals, O., and Dean, J. (2014). Distilling the knowledge in a neural network. Workshop paper +in NIPS. +Holmes, C., Zhang, M., He, Y., and Wu, B. (2022). Compressing pre-trained transformers via low-bit nxm +sparsity for natural language understanding. arXiv preprint arXiv:2206.15014. +Hu, E. J., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., Chen, W., et al. (2021). Lora: Low-rank +adaptation of large language models. In International Conference on Learning Representations. +Iyer, S., Dandekar, N., and Csernai, K. (2017). First quora dataset release: Question pairs.(2017). URL +https://data. quora. com/First-Quora-Dataset-Release-Question-Pairs. +Jiao, X., Yin, Y., Shang, L., Jiang, X., Chen, X., Li, L., Wang, F., and Liu, Q. (2019). Tinybert: Distilling +bert for natural language understanding. arXiv preprint arXiv:1909.10351. +Kim, S., Gholami, A., Yao, Z., Mahoney, M. W., and Keutzer, K. (2021). I-bert: Integer-only bert quantization. +In International conference on machine learning, pages 5506–5518. PMLR. +Lagunas, F., Charlaix, E., Sanh, V., and Rush, A. M. (2021). +Block pruning for faster transformers. +In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages +10619–10629. +Lambda (2023). GPU workstation for deep learning. https://lambdalabs.com/gpu-workstations/vector. +Lan, Z., Chen, M., Goodman, S., Gimpel, K., Sharma, P., and Soricut, R. (2019). Albert: A lite bert for +self-supervised learning of language representations. arXiv preprint arXiv:1909.11942. +LeCun, Y., Denker, J. S., and Solla, S. A. (1990). Optimal brain damage. In Advances in neural information +processing systems, pages 598–605. +Lewis, M., Liu, Y., Goyal, N., Ghazvininejad, M., Mohamed, A., Levy, O., Stoyanov, V., and Zettlemoyer, L. +(2020). Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, +and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational +Linguistics, pages 7871–7880. +Li, F., Zhang, B., and Liu, B. (2016a). Ternary weight networks. arXiv preprint arXiv:1605.04711. +Li, H., Kadav, A., Durdanovic, I., Samet, H., and Graf, H. P. (2016b). Pruning filters for efficient convnets. +arXiv preprint arXiv:1608.08710. +Li, Z., Wang, Z., Tan, M., Nallapati, R., Bhatia, P., Arnold, A., Xiang, B., and Roth, D. (2022). Dq-bart: +Efficient sequence-to-sequence model via joint distillation and quantization. In Proceedings of the 60th +Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 203–211. +Liu, Z., Wang, Y., Han, K., Zhang, W., Ma, S., and Gao, W. (2021). Post-training quantization for vision +transformer. Advances in Neural Information Processing Systems, 34. +13 + +Mao, H., Han, S., Pool, J., Li, W., Liu, X., Wang, Y., and Dally, W. J. (2017). Exploring the regularity of +sparse structure in convolutional neural networks. Workshop paper in CVPR. +Mao, Y., Wang, Y., Wu, C., Zhang, C., Wang, Y., Yang, Y., Zhang, Q., Tong, Y., and Bai, J. (2020). Ladabert: +Lightweight adaptation of bert through hybrid model compression. arXiv preprint arXiv:2004.04124. +Marcinkiewicz, M. A. (1994). Building a large annotated corpus of english: The penn treebank. Using Large +Corpora, page 273. +Merity, S., Xiong, C., Bradbury, J., and Socher, R. (2017). Pointer sentinel mixture models. In International +Conference on Learning Representations. +Michel, P., Levy, O., and Neubig, G. (2019). Are sixteen heads really better than one? +arXiv preprint +arXiv:1905.10650. +Micikevicius, P., Narang, S., Alben, J., Diamos, G., Elsen, E., Garcia, D., Ginsburg, B., Houston, M., +Kuchaiev, O., Venkatesh, G., et al. (2018). Mixed precision training. In International Conference on +Learning Representations. +Mishra, A., Latorre, J. A., Pool, J., Stosic, D., Stosic, D., Venkatesh, G., Yu, C., and Micikevicius, P. (2021). +Accelerating sparse deep neural networks. arXiv preprint arXiv:2104.08378. +Narayan, S., Martins, A., Sordoni, A., Bachman, P., Courville, A., and Bengio, Y. (2018). Don’t give me +the details, just the summary!: topic-aware convolutional neural networks for extreme summarization. In +Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3706–3716. +NVIDIA (2017). CUTLASS: Fast Linear Algebra in CUDA C++. https://developer.nvidia.com/blog/ +cutlass-linear-algebra-cuda/. +NVIDIA (2021). Employing CUDA Graphs in a Dynamic Environment. https://developer.nvidia.com/b +log/employing-cuda-graphs-in-a-dynamic-environment/. +NVIDIA (2023). FasterTransformer. https://github.com/NVIDIA/FasterTransformer. +Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., and Sutskever, I. (2019). Language models are +unsupervised multitask learners. +Raganato, A., Scherrer, Y., and Tiedemann, J. (2020). Fixed encoder self-attention patterns in transformer- +based machine translation. arXiv preprint arXiv:2002.10260. +Rajpurkar, P., Zhang, J., Lopyrev, K., and Liang, P. (2016). SQuAD: 100,000+ questions for machine +comprehension of text. arXiv preprint arXiv:1606.05250. +Sanh, V., Debut, L., Chaumond, J., and Wolf, T. (2019). Distilbert, a distilled version of bert: smaller, faster, +cheaper and lighter. arXiv preprint arXiv:1910.01108. +Sanh, V., Wolf, T., and Rush, A. (2020). Movement pruning: Adaptive sparsity by fine-tuning. Advances in +Neural Information Processing Systems, 33:20378–20389. +Shen, S., Dong, Z., Ye, J., Ma, L., Yao, Z., Gholami, A., Mahoney, M. W., and Keutzer, K. (2020). Q-BERT: +Hessian based ultra low precision quantization of bert. In AAAI, pages 8815–8821. +Socher, R., Perelygin, A., Wu, J., Chuang, J., Manning, C. D., Ng, A. Y., and Potts, C. (2013). Recursive +deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference +on empirical methods in natural language processing, pages 1631–1642. +Sun, S., Cheng, Y., Gan, Z., and Liu, J. (2019). Patient knowledge distillation for bert model compression. +arXiv preprint arXiv:1908.09355. +14 + +Sun, X., Wang, N., Chen, C.-Y., Ni, J., Agrawal, A., Cui, X., Venkataramani, S., El Maghraoui, K., Srinivasan, +V. V., and Gopalakrishnan, K. (2020a). Ultra-low precision 4-bit training of deep neural networks. Advances +in Neural Information Processing Systems, 33:1796–1807. +Sun, Z., Yu, H., Song, X., Liu, R., Yang, Y., and Zhou, D. (2020b). Mobilebert: a compact task-agnostic +bert for resource-limited devices. arXiv preprint arXiv:2004.02984. +Tang, H., Zhang, X., Liu, K., Zhu, J., and Kang, Z. (2022). Mkq-bert: Quantized bert with 4-bits weights +and activations. arXiv preprint arXiv:2203.13483. +Tenney, I., Das, D., and Pavlick, E. (2019). Bert rediscovers the classical nlp pipeline. arXiv:1905.05950. +Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, Ł., and Polosukhin, I. +(2017). Attention is all you need. In Advances in neural information processing systems, pages 5998–6008. +Wang, W., Wei, F., Dong, L., Bao, H., Yang, N., and Zhou, M. (2020). Minilm: Deep self-attention distillation +for task-agnostic compression of pre-trained transformers. arXiv preprint arXiv:2002.10957. +Warstadt, A., Singh, A., and Bowman, S. R. (2018). Neural network acceptability judgments. arXiv preprint +arXiv:1805.12471. +Williams, A., Nangia, N., and Bowman, S. R. (2017). A broad-coverage challenge corpus for sentence +understanding through inference. arXiv preprint arXiv:1704.05426. +Wu, X., Yao, Z., Zhang, M., Li, C., and He, Y. (2022). Extreme compression for pre-trained transformers +made simple and efficient. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K., editors, Advances in +Neural Information Processing Systems. +Xiao, G., Lin, J., Seznec, M., Demouth, J., and Han, S. (2022). Smoothquant: Accurate and efficient +post-training quantization for large language models. arXiv preprint arXiv:2211.10438. +Xiong, R., Yang, Y., He, D., Zheng, K., Zheng, S., Xing, C., Zhang, H., Lan, Y., Wang, L., and Liu, T. +(2020). On layer normalization in the transformer architecture. In International Conference on Machine +Learning, pages 10524–10533. PMLR. +Yao, Z., Aminabadi, R. Y., Zhang, M., Wu, X., Li, C., and He, Y. (2022). Zeroquant: Efficient and affordable +post-training quantization for large-scale transformers. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, +K., editors, Advances in Neural Information Processing Systems. +Yao, Z., Wu, X., Ma, L., Shen, S., Keutzer, K., Mahoney, M. W., and He, Y. (2021). LEAP: Learnable +Pruning for Transformer-based Models. arXiv e-prints, page arXiv:2105.14636. +15 + +A +Related Work +Model compression, as a technique to reduce to the model size and computation costs, can be achieved by +pruning, quantization, low-rank factorization and efficient architecture designs (Han et al., 2015; Li et al., +2016b; Mao et al., 2017; LeCun et al., 1990; Michel et al., 2019; Fan et al., 2019; Gordon et al., 2020; Raganato +et al., 2020; Dong et al., 2019; Yao et al., 2021; Mao et al., 2020; Hinton et al., 2014; Sanh et al., 2019; Sun +et al., 2019; Jiao et al., 2019; Sun et al., 2020b; Wang et al., 2020; Lan et al., 2019; Dehghani et al., 2018; Liu +et al., 2021; Hu et al., 2021; Micikevicius et al., 2018). Among the large body of litterateurs, we mainly cover +the recent related works on INT4 quantization and system inference. +As described in the introduction, the 8-bit quantization for LLMs, and/or mixing with other precision, has +been widely studied and proven to be effective in recent years (Yao et al., 2022; Xiao et al., 2022; Dettmers +et al., 2022a,b; Li et al., 2022; Kim et al., 2021). However, the purely INT4 quantization, as a very aggressive +technique that can have a significant impact on the accuracy of the model, is not widely used in practice +and still emerging. To the best of our knowledge, we describe some more closely related works besides those +mentioned in the introduction. In Sun et al. (2020a), a 4-bit floating point format with an adaptive gradient +scaling technique is proposed to demonstrate its effectiveness in computer vision, speech and NLP tasks and +solid hardware acceleration. Our study focuses on the use of INT4 quantization instead of FP4 and the +acceleration hardware is based on the Ampere structure. In Chung et al. (2020), a low-bits mixed precision +quantization strategy is proposed to represent Transformer models. However, their activations are kept in full +precision. In Han et al. (2020), a detailed implementation of INT4 optimization is presented, but it is only +applicable to convolution networks and not transformer models. +B +Quantization Algorithms +Weight Quantization. To quantize a weight matrix W ∈ Rdin×dout in a model, we apply the group-wise +quantization method proposed in Shen et al. (2020); Yao et al. (2022). That is, we vectorize Vectorize(W) ∈ Rd +(d = dindout) and participate the weight into g groups, and each group is quantized separately. The finer the +quantization we use (larger g), the smaller the approximation error is between the weight matrix and the +4-bit counterpart. The largest group number we apply here is din, i.e., a row-wise weight quantization for +best GPU utilization, +Activation Quantization. Different from the static weight parameter during inference, the activation is +dynamic. In order to achieve the best latency reduction (Gholami et al., 2021), the static quantization method +calibrates S using training data and fixes S during inference. However, this also limits the quantization +representation for activation as discussed in Yao et al. (2022). Thus, we adopt a finer-grained token-wise +dynamic quantization and use the min/max range for each token. +C +Additional Experimental Details and Results +C.1 +Experimental Details for Section 2.2 +All experiments are performed on V100 GPU and the training strategy is Quantization-aware Training (QAT) +with Knowledge Distillation (KD). For BERT-models, the maximum sequence length for MNLI/QQP is set +to 128 with a batch-size of 64 for base and 32 for large. Each independent experiment is ran on a single +GPU with a fixed random seed 42. We recorded their validation performances over the training every 1000 +iterations and report the best validation value. For BART-type models, we follows closely with Li et al. (2022) +with a slightly different hyper-parameters as shown in Table C.1. Each independent experiment is ran on 2 +GPUs for base models and 4 GPUs for large model. As for GPT2-type models, we applied the pretrained +hugging-face models with maximum length 1024 and a batch-size of 8 with 4 GPUs for an independent run. +See Table C.1 for the hyper-parameters search and we will open sources the codes and the configurations. +Asymmetric and Symmetric Quantization. To give better understand on the difference of asym- +metric and symmetric quantization, we plot in Figure C.1 for the iterative performance over the validation +16 + +datasets during the quantized-aware training. The orange curves always sits on top of the blue dash line, +proving the assymetric quantization is better than symmetric. Furthermore, Figure C.1 shows that (1) +The gaps between symemtric and assymetric quantization appears more obvious as the model size increase +from base (the first row) to large/medium (the second row), which indicates the importance of asymmetric +quantization for large models; (2) While the benefits of asymmetric method (over the symmetric one) could +become marginal from the beginning of the training to the end, it appears that is only the case for BERT +and BART not for GPT. +Table C.1: The hyper-parameters we tuned for the results in Table 1. The entry with single choice means we +only use the default value. For the entry with multiple choice, we bold the one that gives the best performance. +In the table, Att with ⋆ (Att⋆) means the attention scores that is not normalized, and Att is a normalized +version (note that the default output from Huggingface library is a normalized venison of attention scores). +Models +BERT +BART +GPT +Size +Base +Large +Base +Large +Base +Medium +Dropout +0.1 (default) +0.1 (default) +{0, 0.05, 0.1} +Clip Values +{[-5.0, 5.0], [-∞, +∞]} +{[-1,1],[-2.5,2.5]} +[-2.5, 2.5] +{[-0.5,0.5],[-1, 1], [-2.5, 2.5]} +Loss Terms +Logit/Att⋆/Rep (default) +ClsLoss/Logit/Att⋆/Rep +{None,ClsLoss}+{Att,Att⋆}+Logit/Rep +Epoch +{3, 9} +{3, 5} +20 +8 +{30, 60, 90} +{15, 30, 45} +batch-size +64 +32 +16 +8 +8 +8 +Learning Rate +{5e-5, 1e-4} +{2e-5, 5e-5} +{2e-5, 5e-5} +{2e-5, 5e-5} +{5e-5, 1e-4, 5e-4} +{5e-5, 1e-4} +0 +10 +20 +30 +40 +50 +60 +82 +83 +84 +85 +86 +BERT-Base Accuracy +Average Acc. of MNLI-m and MNLI-mm +Quantization Method | Best Acc.: +symmetrics | 84.31/84.48 +asymmetrics | 84.29/84.65 +0 +10 +20 +30 +40 +50 +60 +Iterations (x1000) +82 +83 +84 +85 +86 +BERT-Large Accuracy +Quantization Method | Best Acc.: +symmetrics | 86.25/86.2 +asymmetrics | 86.49/86.28 +2.5 +5.0 +7.5 +10.0 +12.5 +15.0 +17.5 +20.0 +29 +30 +31 +32 +33 +34 +BART-Base RLsum +Rouge Lsum of XSUM +Quantization Method | Best Rouge Lsum: +symmetrics | 33.69 +asymmetrics | 33.62 +1 +2 +3 +4 +5 +6 +7 +8 +Epoch +32 +33 +34 +35 +36 +37 +BART-Large RLsum +Quantization Method | Best Rouge Lsum: +symmetrics | 36.33 +asymmetrics | 36.79 +0 +10 +20 +30 +40 +50 +25 +26 +27 +28 +29 +30 +31 +32 +33 +GPT2-Base PPL +Perplexity (PPL) of Wikitext-2 +Quantization Method | Best PPL.: +symmetrics | 27.28 +asymmetrics | 25.99 +0 +10 +20 +30 +40 +50 +Iterations (x500) +18 +19 +20 +21 +22 +23 +24 +25 +26 +GPT2-Medium PPL +Quantization Method | Best PPL.: +symmetrics | 19.51 +asymmetrics | 18.74 +Figure C.1: The performance of w4a4 during the quantization-aware trainig with KD over the validation +dataset for BERT (left), BART (middle), GPT (right) models, respectively with metrics: Accuracy (Acc., +higher is better), Rouge Lsum (RLsum, higher is better), and perplexity (PPL, lower is better). Molde sizes +in the top row are smaller than those in the bottom row. +C.2 +MKQ-BERT Results +Table C.2 shows the latency for a single BERT-base layer reported by MKQ-BERTTang et al. (2022) Table 2, +compared to FasterTransformerNVIDIA (2023). We can see that both the FP32 and INT8 results are off by +more than an order of magnitude. Due to the lack of implementation details described in the MKQ-BERT +paper (no open-sourced code), we cannot further identify the issue. +C.3 +Sensitivity of Activation Quantization for GPT2 +In this section, we study how sensitive the model quality to activation quantization. We relax the INT4 +activation to be INT8 or back to FP32, and follows the same QAT recipe as W4A4. We plot average perplexity +17 + +Table C.2: End-to-end inference time (ms) for running one layer in BERT-base model with different batch +size and sequence length on NVIDIA T4 GPUs. Column 2 to 4 are numbers taken from Tang et al. (2022). +FasterTransformer(FT) requires sequence length to be multiple of 32, thus the inputs in the parenthesis are +used to run FasterTransformer. +Batch Size-Seq. Length +MKQ-fp32 +MKQ-int8 +MKQ-in4 +FT-fp32 +FT-int8 +16-440 (16-448) +1.38 +0.2131 +0.1605 +25.62 +5.1 +16-537 (16-544) +1.845 +0.2457 +0.1793 +34.25 +6.61 +16-681 (16-704) +2.69 +0.2609 +0.1965 +46.54 +9.39 +with respect to the training iteration in Figure 5 (left) as well as the position perplexity at training iteration +34000 in Figure 5 (right). We see that although using QAT with KD, w4a8 (green) can be better than +W4A4 but still far away from teacher’s quality. Only 4-bit weight quantization (w4 only, red curve) can +almost match the teacher’s quality (blue), which indicates that autoregressive generation using GPT models +is highly sensitive to activation quantization. It is interesting to notice that the red curve in Figure 5 (left) +already flatten at the beginning of the training, which means PTQ method could be possible for weight only +quantization, this aligns with observation in Dettmers and Zettlemoyer (2022). +C.4 +More Experiments on Composing Pruning and INT4 +Besides the MNLI/QQP mentioned in Section 5.1, we include the following GLUE tasks for the W4A4 +quantization: MRPC (Dolan and Brockett, 2005), STS-B (Cer et al., 2017), SST-2 (Socher et al., 2013), +QNLI (Rajpurkar et al., 2016), QQP (Iyer et al., 2017), MNLI (Williams et al., 2017), CoLA (Warstadt et al., +2018), RTE (Dagan et al., 2013)). The maximum sequence length is set to 64 for CoLA/SST-2, and 128 for +the rest sequence pair tasks. +Table C.3: Comparison between static and iterative pruning methods on top of w4a4 models. Here the 50% +sparsity is semi-struture pruning with Pair-(2:4). We applied data augmentation for the smaller datasets and +used the long training epochs (Budget-C) shown in Table C.4. The learning rate is fixed with 1e-4. Note the +results for MNLI and QQP are different from Table 2 is due to the teacher models. +Model +Pruning Method +CoLA +MNLI-m/-mm +MRPC +QNLI +QQP +RTE +SST-2 +STS-B +Avg. +Mcc +Acc/Acc +F1/Acc +Acc +F1/Acc +Acc +Acc +Pear/Spea +all +BERTbase (teacher) +59.7 +84.9/85.6 +90.6/86.3 +92.1 +88.6/91.5 +72.2 +93.2 +90.1/89.6 +83.95 +w4a4+ 50% sparsity +Static (weight) +57.4 +84.4/84.8 +90.8/86.5 +91.4 +88.3/91.4 +73.3 +93.3 +89.5/89.2 +83.56 +Iterative (gradient) +58.1 +84.3/84.9 +90.9/86.8 +91.4 +88.4/91.4 +72.9 +93.3 +89.4/89.0 +83.61 +Table C.4: Training budgets for the GLUE tasks. +Dataset +Data +Training epochs: +Aug. +Budget-A +Budget-B +Budget-C +QQP/MNLI +no +3 +9 +18 or 36 +QNLI +yes +1 +3 +6 or 9 +SST-2/STS-B/RTE +yes +1 +3 +12 +CoLA/MRPC +yes +1 +3 +12 or 18 +18 + +Table C.5: Results with data augmentation. W4A4 with Budget-A +Cost +learning +CoLA +MNLI-m/-mm +MRPC +QNLI +QQP +RTE +SST-2 +STS-B +Avg. +Avg. w/o +rate +Mcc +Acc/Acc +F1/Acc +Acc +F1/Acc +Acc +Acc +Pear/Spea +all +CoLA +BERTlarge (fp32) +63.4 +85.4/85.4 +91.6/88.0 +92.2 +88.4/91.05 +74.00 +93.6 +90.3/90.1 +84.81 +87.50 +Budget-A +1e-05 +62.1 +85.6/85.3 +91.2/87.3 +92.5 +88.3/91.3 +69.7 +93.7 +90.3/90.1 +84.20 +86.96 +5e-05 +64.6 +85.6/85.2 +91.0/87.0 +92.5 +87.7/90.9 +72.2 +93.8 +90.7/90.4 +84.72 +87.24 +0.0001 +61.7 +85.8/85.4 +90.9/87.0 +91.9 +88.5/91.5 +75.5 +93.8 +90.6/90.3 +84.80 +87.69 +Best (above) +64.6 +85.8/85.4 +91.2/87.3 +92.5 +88.5/91.5 +75.5 +93.8 +90.7/90.4 +85.23 +87.81 +BERTbase (fp32) +59.7 +84.9/85.6 +90.6/86.3 +92.1 +88.6/91.5 +72.2 +93.2 +90.1/89.6 +83.95 +86.98 +Budget-A +2e-05 +59.1 +84.5/85.0 +91.2/87.0 +91.7 +88.3/91.3 +73.6 +93.7 +89.8/89.4 +83.97 +87.08 +5e-05 +60.5 +84.6/85.0 +90.4/85.8 +91.6 +88.4/91.4 +71.5 +93.3 +90.0/89.6 +83.74 +86.65 +0.0001 +59.9 +84.8/85.2 +90.4/85.8 +92.0 +88.3/91.4 +72.6 +93.5 +89.9/89.6 +83.90 +86.90 +Best (above) +60.5 +84.8/85.2 +91.2/87.0 +92.0 +88.4/91.4 +73.6 +93.7 +90.0/89.6 +84.24 +87.21 +2 +4 +6 +8 +10 +Iteration (x3400) +15.0 +17.5 +20.0 +22.5 +25.0 +27.5 +30.0 +32.5 +Perplexity +Average ppl w.r.t to iterations +Teacher (fp32) +w4a4 +w4a8 +w4 only +0 +25 +50 +75 100 125 150 175 200 +Position +15.0 +17.5 +20.0 +22.5 +25.0 +27.5 +30.0 +32.5 +Perplexity +Positioin-wise ppl at iteration 34000 +0 +2 +4 +6 +8 +10 +0 +100 +200 +300 +Zoom Out Y-axis +GPT2: Generation (wikitext2) +0 +5 +10 +15 +20 +25 +30 +35 +Iterations (x1000) +81 +82 +83 +84 +85 +86 +87 +Val. Acc. (MNLI-m) +Fine-tuning MNLI with 3 epochs: +Method | Best Acc (MNLI-m/-mm) : +Dynamic Mov. | 83.83/84.07 +Static 1 | 84.31/84.79 +0 +50 +100 +150 +200 +Iterations (x1000) +Fine-tuning MNLI with 18 epochs: +Method | Best Acc (MNLI-m/-mm) : +Dynamic Mov. | 84.45/84.55 +Static 1 | 84.56/85.04 +Figure C.2: The top two figures are for GPT2 quantization. The bottom two figures are for the comparison +between movement and ℓ1 pruning with QAT. +Static pruning or iterative movement pruning. Now that we decide to apply prune and then +quantize (P=>Q) algorithm, one may wonder if the ℓ1 pruning method used above is the best pruning +algorithm. Recent advancement on pruning methods suggests that Movement Pruning with iterative pruning +threshold (Sanh et al., 2020; Lagunas et al., 2021) has been proven to be effective in transfer learning for +languages models. That is, during the iterative pruning, the mask will be updated and determined by the +gradients of the weight instead of the value of the weight. Previous works only work on pruning only, here we +investigate on whether it works well with quantized models and layerwise KD. +The results are shown in Figure C.2 or Table C.3. We see that ℓ1 is consistently better under long or +short training epochs, although the gap between the two methods can be reduced with sufficient iterations. +As the iterative pruning based on the gradient of weight matrices requires to update the masks dynamically, +the computation complexity/time is much higher than that using the static masks under the same training +iterations. Thus, this finding indicates that static pruning is sufficient when applying KD for QAT. +19 + diff --git a/MNFLT4oBgHgl3EQfMy8Y/content/tmp_files/load_file.txt b/MNFLT4oBgHgl3EQfMy8Y/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..adb4717099c07d7f3aba36f1ea5095c480d61495 --- /dev/null +++ b/MNFLT4oBgHgl3EQfMy8Y/content/tmp_files/load_file.txt @@ -0,0 +1,1705 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf,len=1704 +page_content='Understanding INT4 Quantization for Transformer Models: Latency Speedup, Composability, and Failure Cases Xiaoxia Wu∗, Cheng Li∗, Reza Yazdani Aminabadi Zhewei Yao, Yuxiong He Microsoft {xiaoxiawu, chengli1, yazdani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='reza, zheweiyao, yuxhe}@microsoft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='com Abstract Improving the deployment efficiency of transformer-based language models has been challenging given their high computation and memory cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' While INT8 quantization has recently been shown to be effective in reducing both the memory cost and latency while preserving model accuracy, it remains unclear whether we can leverage INT4 (which doubles peak hardware throughput) to achieve further latency improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In this work, we fully investigate the feasibility of using INT4 quantization for language models, and show that using INT4 introduces no or negligible accuracy degradation for encoder-only and encoder-decoder models, but causes a significant accuracy drop for decoder-only models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To materialize the performance gain using INT4, we develop a highly-optimized end-to-end INT4 encoder inference pipeline supporting different quantization strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Our INT4 pipeline is 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5× faster for latency-oriented scenarios and up to 3× for throughput-oriented scenarios compared to the inference of FP16, and improves the SOTA BERT INT8 performance from FasterTransformer by up to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7×.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We also provide insights into the failure cases when applying INT4 to decoder-only models, and further explore the compatibility of INT4 quantization with other compression techniques, like pruning and layer reduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 1 Introduction As pre-trained large language models (LLMs) (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017) such as BERT (Tenney et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019), BART Lewis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020), and GPT (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019) require a significant amount of GPU resources to deploy, compression becomes a common practice to optimize model inference, especially for resource- constrained environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' One of the widely used compression techniques is quantization where data are stored and manipulated in a lower-precision format, such as 8-bit or 4-bit integers instead of 32-bit or 16-bit floating-point numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' It not only reduces the amount of memory required to store the model, but also can leverage the higher GEMM computation throughput for lower-bit data types on supported GPUs (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', peak INT4 Tensor Core TFLOPS doubles that of INT8 and quadruples that of FP16) to improve inference latency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Note that only quantizing the model weights without computing in lower-bit data types (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', keeping activation in FP16 or FP32) introduces no latency improvement (or even slower due to type conversion at runtime) but only memory saving.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Recent work proposes techniques to apply INT8 quantization (using INT8 computation where both weight and activation are quantized, referred to as W8A8) to all linear layers without introducing accuracy degradation for transformers (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dettmers et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022a,b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022) also present an INT8 inference pipeline and show good end-to-end (E2E) performance improvement over FP16 model inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' NVIDIA’s FasterTransformer (NVIDIA, 2023) holds SOTA open-source INT8 implementations where aggressive quantization are explored: mode-1 quantizes the ∗Equal Contribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Code will be released soon as a part of https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='com/microsoft/DeepSpeed 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='12017v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='CL] 27 Jan 2023 attention computation beyond linear layers, and mode-2 further quantizes the residual connection trading off accuracy for latency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' While we are advancing W8A8 quantization algorithms and implementations proven to be effective for LLMs, the questions arise: (1) whether INT4 inference (using INT4 computation where both activation and weight are quantized, referred to as W4A4) is feasible (acceptable accuracy drop) for these models, and (2) how it can be leveraged for performance improvement on real hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Although W4A4 has been successfully applied to other model types or hardware, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', convolution models for image classification with quantization-aware training strategy (QAT) (Abdolrashidi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021),1 there is lack of work on exploring W4A4 for LLMs inference on GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dettmers and Zettlemoyer (2022) show little accuracy loss for LLMs when only model weights are quantized to 4-bit with post-quantization training (PTQ)2, while the computation is still in FP16 as the activations are not quantized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022) prove that even the binary network can result in only a small degradation if applying QAT with knowledge distillation (KD) (Hinton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2014) and longer training, but the activations are quantized to INT8 (using INT8 computation, not INT4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Tang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022) are the first to claim to apply W4A4 to BERT for inference with QAT and KD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' However, their quantization method fails to enable W4A4 for all but only the last two layers in a four-layer TinyBERT model (otherwise causing drastic accuracy drops).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Moreover, their E2E INT4 inference lacks implementation details, with conflicting performance numbers when compared to FasterTransformer (see Appendix C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In this work, we aim not only to better understand the accuracy impact of INT4 quantization on common LLMs, but also to materialize and maximize the benefit of using INT4 computation in E2E inference, further improving the SOTA inference performance on LLMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Specifically, we make the following contributions: We explore the feasibility of W4A4 quantization across popular language model types, by leveraging the recent layer-wise knowledge distillation method for quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We show that our W4A4 can achieve no accuracy loss for the encoder-only models (BERT) on classification problems, negligible accuracy difference for encoder-decoder models (BART) on summarization tasks, but causes a relatively larger accuracy drop for decoder-only models (GPT) on autoregressive generation tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We develop a highly optimized end-to-end encoder model inference pipeline to support INT4 computa- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The pipeline is built with modular components supporting different quantization strategies to accommodate latency- or throughput-oriented scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Our inference pipeline is up to 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5×/3× faster for latency-/throughput-oriented scenarios when compared to HuggingFace FP16 BERT implementation, and improves the SOTA BERT INT8 performance from NVIDIA FasterTransformer by up to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7×.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To unveil the causes of larger accuracy drop for decoder-only models (GPT) when using INT4 quantiza- tion, we provide an in-depth analysis of layer normalization, pretraining effect, and attention mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Additionally, we study the composability of INT4 quantization with other compression techniques, including pruning and layer-reduction, for encoder-related models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We defer additional related work to Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 2 Model Accuracy for INT4 Quantization 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 Quantization Algorithms and Training Quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For completeness, we here explain the symmetric and asymmetric quantization algo- rithms (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Suppose x ∈ Rd and xq ∈ Rd represent respectively a full-precision and a quantized vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The uniform symmetric mapping strategy from x and xint is x(sym) q = S � clamp(x/S;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' −2b−1, 2b−1 − 1) � , 1QAT requires the full training pipeline by quantizing the weight and activation during the forward process and updating the weights with gradients computed by straight through estimator (Bengio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2013) or other methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 2PTQ means the quantized model is arrived directly by mapping the weights from floating-point to low precision values without the full pipeline training (dataset and backward gradient).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 2 where clamp restricts the value of its argument to a given range from −2b−1 to 2b−1 − 1, b is the number of bits used to represent the quantized value, ⌈·⌉ is the rounding operator, and S ∈ R is the scaling factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For example, S can be computed as the maximum of the absolute elements in the x vector, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', S = max (abs(x)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' On the other hand, the asymmetric mapping strategy can be expressed as x(asym) q = S � clamp((x − xzero1)/S;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 0, 2b−1 − 1) � + xzero1, where xzero is used as a reference point potentially reducing any bias into the asymmetric vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The scalar S can be computed as S = max(x) − min(x) and xzero = min(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Throughout the paper, we always do both weight and activation quantization using the method proposed in Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' See Appendix B for more details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Knowledge Distillation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Knowledge distillation (KD) can greatly improve the performance of quantized transformer models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' It trains a smaller quantized model (the student model) by incorporating the knowledge from the larger full-precision model (the teacher model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' This can be done by training the student model to mimic the behavior of the teacher model on the training dataset, using the output probabilities as a soft target (Hinton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2014) and the hidden states (and/or attention maps) of each transformer layer to align feature maps (Jiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Bai et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2016a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Table 1: The best quality for BERT/BART/GPT-type models (two sizes) over the validation datasets, respectively with metric Accuracy (Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', higher is better), Rouge Lsum (RLsum, higher is better), and perplexity (PPL, lower is better).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Models BERT-base (110M) BART-base (140M) GPT2-base (117M) Tasks MNLI-m/mm QQP CNNDailyMail XSUM PTB WIKI-2 WIKI-103 Metrics Acc/Acc F1/Acc R1/R2/RLsum R1/R2/RL Perplexity Perplexity Perplexity FP32 (teacher) 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='20/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='67 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='83/90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='95 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='62/22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='85/42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='87 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='18/19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='44/34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='36 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='31 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='02 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='46 W4A4 (symmetric) 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='31/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='48 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='11/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='14 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='63/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='42/41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='92 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='54/18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='61/33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='69 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='17 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='28 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='75 W4A4 (asymmetric) 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='29/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='65 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='17/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='19 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='83/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='67/42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='08 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='53/18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='56/33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='62 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='72 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='99 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='54 Models BERT-large (345M) BART-large (406M) GPT2-medium (355M) Tasks MNLI-m/mm QQP CNNDailyMail XSUM PTB WIKI-2 WIKI-103 Metrics Acc/Acc F1/Acc R1/R2/RLsum R1/R2/RL Perplexity Perplexity Perplexity FP32 (teacher) 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='65/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='91 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='08/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='07 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='82/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='67/41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='80 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='42/22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='37/37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='29 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='92 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='92 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='75 W4A4 (symmetric) 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='25/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='20 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='30/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='17 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='12/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='73/42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='31 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='39/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='28/36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='33 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='69 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='51 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='57 W4A4 (asymmetric) 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='49/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='28 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='35/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='24 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='20/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='85/42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='40 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='91/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='74/36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='79 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='32 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='74 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='23 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 INT4 Quantization for Language Models We perform the 4-bit quantization on all linear layers using QAT and KD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We use BERT-base and BERT-large (Tenney et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019) as representatives for encoder-only models and fine-tune them on two largest GLUE tasks, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', QQP (Iyer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017) and MNLI (Williams et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017) for small accuracy variations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We use GPT2 and GPT2-medium (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019) as representatives for decoder-only models and fine-tune them on three causal generation tasks, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', PTB (Marcinkiewicz, 1994), Wikitext-2, and Wikitext-103 (Merity et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Finally, we use BART-base and BART-large as representatives for encoder-decoder models and fine-tune them on two summarization tasks, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', CNNDaiyMail (Hermann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2015), and XSum (Narayan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In order to reduce the hyper-parameters’ effect, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', the best quantization configuration for BERT may be suboptimal for GPT, we exhaustively search hyper-parameters including iterations, learning rate, dropout, quantization groups, clip values, and knowledge distillation terms for each model and choose the best one to report here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We include the experimental details in Appendix C and Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We present the main results in Table 1 for both symmetric and asymmetric quantizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We also provide more detailed iterative-vs-accuracy plots in Figure C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 on the validation datasets for QAT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For symmetric quantization, as can be seen, there is no accuracy degradation for BERT models and negligible drops (≤ 1 point) for BART models, while the 4-bit decoder models, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', GPT2 and GPT2-medium, show a significant drop in perplexity (≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 points) compared to the original FP32 models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' This suggests that 3 Figure 1: CUTLASS INT4 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' INT8 GEMM performance comparison across different batch size×sequence length (M) for BERT-base and BERT-large GEMM shapes (N and K).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We use the best GEMM schedule for different inputs identified with the CUTLASS profiler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Left axis shows the throughput achieved (Peak INT8 and INT4 Tensor TOPS is 309.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 and 619.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 TFLOPS on A6000 GPU) and the right axis shows the speedup of INT4 over INT8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' classification/summarization tasks using encoder-only/encoder-decoder models are much more robust to quantization when compared to auto-regressive generation tasks using decoder-only models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Asymmetric quantization generally improves the accuracy performance over symmetric quantization since it better utilizes the quantization range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' One notable thing is that even with a better quantization scheme (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', asymmetric quantization) and exhaustive hyper-parameter tuning, decoder-only models still have larger quality degradation compared to encoder-only and encoder-decoder models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To provide more insight into why decoder-only models are more sensitive to INT4 quantization, we give a detailed analysis in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 3 Highly Optimized INT4 Encoder Inference To materialize and maximize the benefits of using INT4 computation in model inference, we develop a set of custom GPU kernels and an E2E highly optimized pipeline to support inference with INT4 (as well as INT8) quantized encoder models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We adopt the system optimizations described in (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022) and (Aminabadi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022) when applicable, and take advantage of FlashAttention (Dao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022) and the CUDA graph (NVIDIA, 2021) to further improve the performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Moreover, we explore different quantization strategies for latency- or throughput-oriented scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The software design and implementation also largely apply to other model types, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', GPT decoders if the accuracy drop can be resolved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We conduct the performance experiments on a Lambda A6000 workstation Lambda (2023) (2×A6000- 48GB-GPU, 256GB DRAM, and 2TB NVME), with the following software setup: HuggingFace transformers 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1, NVIDIA FasterTransformer v5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1, PyTorch: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1, cuda 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7, and cutlass v2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Currently, INT4 GEMM is not supported by CUBLAS, and is only available through CUTLASS (NVIDIA, 2017) and we use that to support the INT4 computation in model inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 INT4 GEMM INT4 Tensor Core performance (peak TFLOPS) theoretically doubles INT8 throughput on supported NVIDIA GPUs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' However, to achieve the 2× speedup, the GEMM input shapes have to be large enough (being compute- intensive).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The linear layers that are quantized and computed with INT4 data in the encoder model inference are QKV projection, attention output, MLP intermediate, and MLP output GEMM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The GEMM shapes (M-N-K) for these layers are (bs× seq −3h−h), (bs× seq −h−h), (bs× seq −4h−h) and (bs× seq −h−4h) respectively, where bs and seq are input batch size and sequence length, and h is the model hidden dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' These shapes set the upper-bound performance improvement we can achieve with INT4 over INT8 GEMM for a given model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 4 CUTLASS INT4 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' INT8 GEMM for Shapes in BERT-base 500 2 INT4 TFLOPS INT8 TFLOPS Speedup INT4 over INT8 400 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 Speedup 9300 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 0 F200 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 100 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 0 1 4h .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4h 512-h- 32-h- 512-3h- 512-h- 12288-h- 3072-3h 3072-h- 12288-h- M-N-K, h = 768CUTLASS INT4 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' INT8 GEMM for Shapes in BERT-large 500 2 INT4TFLOPS INT8 TFLOPS Speedup INT4 over INT8 400 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 Speedup 9300 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 0 F200 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 山 100 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 0 1 4h- .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4h 4h .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4h 32-h-h 512-h-h 32- 32-h- 3072-h-h 512-3h- 512-h- 3072-3h 3072-h- 12288-h- M-N-K, h = 1024Figure 1 shows the performance comparison between INT4 and INT8 GEMM for common shapes in BERT-base and BERT-large model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We can see that the larger the input shape, the higher the speedup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' While the INT4 GEMM speedup for BERT-large are overall higher than BERT-base as the model hidden dimension is larger (1024 vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 768), within a model the four GEMM can have very different achieved INT4 speedup given the same input, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', bs × seq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For example, with bs × seq = 12288 for BERT-large, the attention output GEMM (12288-h-h) only achieves 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='46× speedup while the MLP output GEMM (12288-h-4h) achieves 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='96× when using INT4 over INT8 computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Combining with the quantization/dequantization overhead (see Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2), this difference suggests the need for tunable quantization strategies (enable/disable quantization on certain GEMM parts) depending on the input shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 Holistic Optimizations of End-to-end Inference (a) (b) Figure 2: E2E latency speedup of (a) our INT4 over INT8 with all four parts quantized (i4-qall and i8-qall), and (b) our INT4 with best quantization strategy (i4-qbest) over Fastertransformer INT8 (FT-i8) on A6000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Figure 3: E2E latency speedup of FasterTransformer INT8 (FT-i8), our IN8 with all quantization (q=i8-qall), and our INT4 with best quantization strategy (i4-qbest) over HuggingFace FP16 (HF-fp16) inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' While INT4 computation introduces performance improvement for the linear layers, there are other major components in between using FP16 data types (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', layer normalization, elementwise operations, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The E2E inference requires quantizing/dequantizating the activations before/after the lower-bit GEMM operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Moreover, the improvement from INT4 and the quantization/dequantization overhead are both model- and input-dependent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Depending on the deployment scenarios (latency- or throughput-oriented), the optimal quantization strategies can be different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Thus, maximizing the gain from using INT4 computation requires holistic optimizations of the E2E model inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The quantization/dequantization of activations are memory-bound operations and introduce nontrivial overhead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Similar to Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022), we fuse the quantization operation for activation with its previous element-bias-add, GELU, or layer normalization operation into a single GPU kernel;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' and fuse the dequantiza- tion operation with the INT4 GEMM kernel to avoid extra data movement to global GPU memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Since the current PyTorch does not support the INT4 tensor data type yet, we pack INT4 data into INT8 tensors when invoking our customized kernels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 5 E2E INT4-qall Speedup over INT8-qall 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 BERT-base BERT-large 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 Speedup 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 1 (1-32)(1-128)(8-32)。' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='(8-128)(8-384)(32-128) (32-384) (32-512) batch size - seq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' lengthE2E INT4-qbest Speedup over FT-INT8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 qbest = q3 BERT-base eBERT-large qbest = qall 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 1 (1-32) (1-128)( (8-32)(8-128)(8-384)(32-128)(32-384)(32-512) batch size - seq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' lengthE2E BERT-base Speedup over HF-fp16 9 HF-fp16/FT-i8 3 HF-fp16/i8-qall HF-fp16/i4-qbest 8 7 Speedup 6 5 4 3 2 1 )(8-32) (8-128)(8-384) (32-128)(32-384) (32-512) (1-32)(1-128) batch size - seq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' lengthE2E BERT-large Speedup over HF-fp16 9 HF-fp16/FT-i8 HF-fp16/i8-qall HF-fp16/i4-qbest 8 7 Speedup 6 5 4 3 2 1 (1-32)(1-128) (8-32) (8-128)(8-384) (32-128) (32-384) (32-512) batch size - seq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' lengthFlashAttention (Dao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022) has been shown to largely improve the attention computation performance, especially for large batch sizes and sequence lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We integrate FlashAttention into our inference pipeline to speed up the attention computation (in FP16).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' CUDA graph (NVIDIA, 2021) was introduced by NVIDIA to reduce GPU kernel launching overhead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For small batch sizes and short sequence lengths, the kernel launching overhead is non-negligible, thus we enable CUDA graph in our inference pipeline to minimize such overhead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A model deployment scenario can be either latency-sensitive or throughput-oriented, thus different batch sizes and sequence lengths are used for different cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As shown in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1, the gain from INT4 is input (decides GEMM shapes) dependent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The memory-bound quantization/dequantization operations introduce input-dependent (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', the size of activations) overhead as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Due to the various model sizes (particularly the hidden dimension, h), input shapes, and hardwares, the four linear layers for quantization have different trade-offs between the gain and overhead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For example, for low bs × seq inference with BERT models, quantization of QKV projection, attention output, and MLP output might not result in E2E performance improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' If so, we can skip the quantization of these three parts in inference (note that using a higher-bit computation data type for a QAT model does not degrade the inference accuracy).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As such, we develop the four model parts as modular components where quantization can be enabled or disabled separately in the inference pipeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Different quantization strategies can be applied given a target scenario and hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Also, the GEMM schedules used in inference are pre-tuned (with CUTLASS profiler) for the best performance in the deployment environment as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 End-to-end Inference Performance Results We measure the E2E BERT model INT4 (prefixed with i4-) and INT8 (prefixed with i8-) latency with our inference pipeline and compare it with the HuggingFace FP16 implementation (noted as HF-fp16) as well as the SOTA INT8 implementation (noted as FT-i8) from NVIDIA FasterTransformer (NVIDIA, 2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The input batch size and sequence length are selected to cover both latency- and throughput-oriented scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We explore different quantization strategies (suffix in name to note what is quantized) with the inference pipeline and show the effectiveness of such tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We use symmetric quantization for the BERT models in the experiments as the earlier section shows no accuracy drop and it is faster than asymmetric quantization because of less required computation for bias term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Figure 2a shows the E2E speedup of our INT4 over our INT8 inference when quantizing all four parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Cross-comparing it with Figure 1 which indicates the upper bound of the E2E INT4-vs-INT8 speedup, we can see that the inference pipeline design achieves well the goal of maximizing the performance gain from using INT4 computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Figure 2b compares our best INT4 inference with the Fastertansformer INT8 (using mode-1 as model-2 trades off accuracy for better latency) inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Note that other than the four parts we quantize in our pipeline, Fastertansformer INT8 also quantizes attention computation while we use FP16 FlashAttention (see Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As annotated, the best quantization strategy for (bs − seq) (1-32), (1-128) and (8-32) is to only quantize the MLP intermediate GEMM (q3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For larger batch sizes and sequence lengths, the best configuration is to quantize all four parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We show that our highly-optimized INT4 inference improves the SOTA BERT model performance by up to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7× as compared to FP-INT8, while model quality maintains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Figure 3 presents the speedup of our inference and FasterTransformer pipelines over HuggingFace FP16 inference, a common baseline for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Our INT4 inference is up to 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5× faster for latency-oriented scenarios and up to 3× for throughput-oriented scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Note that we focus on maximizing the performance gain from using INT4 computation in this work, thus orthogonal optimizations from FasterTransformer (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', padding removal) or other work are applicable to our INT4 inference design, and can further improve the inference performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 6 4 Failure Cases: Understanding the Quality Degradation of INT4 Decoder Models For W4A4 GPT models, we have made heavy efforts to tune and distill but their results are still far away from the FP32 counterparts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In this section, we present several analyses of the causes of such degradation, including (1) Layer Normalization (LN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The position of LN is different for encoder and decoder models: LN for BERT and BART happens after each sublayer’s residual connection (“Post-LN") (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017), while LN for GPT models operates at the beginning of each sublayer before adding to the residual values (“Pre-LN") (Xiong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Compared to Pre-LN, Post-LN removes the mean and variance shift caused by the residual connection and activation functions, which might make the network more robust.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A possible conjecture is that the good quality of INT4 BERT/BART is due to the effect of Post-LN, which thus leads the models to less sensitivity to quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2) Pretraining Effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The activation range for decoder models can vary significantly for different layers and for different linear modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A possible conjecture that pretraining with a dataset of a large scale, such as billions of examples, may exacerbate this issue by introducing more diversity in the input activations, which could lead to less optimal quantization performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (3) Attention Mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' GPT models use casual-self-attention mechanism to weight the importance of each word in the input and generate tokens in a sequential manner (autoregressive generation), while BART uses encoder-decoder attention mechanism plus casual-self-attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As such, for the first few generated tokens, BART can still gather information from the encoder-decoder attention which can potentially reduce the quantization error by averaging attention information, while GPT does not have this ability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 0 200 400 600 800 Iterations (x500) 0 2 4 6 8 10 Perplexity Gaps (teacher-w4a4) GPT2-Base: Perplexity Gaps Pre-LN Post-LN 0 50 100150200250300350400450 Iterations (x500) 0 2 4 6 8 10 Perplexity Gaps (teacher-w4a4) GPT2-Medium: Perplexity Gaps Pre-LN Post-LN Figure 4: The quality gaps between W4A4 and FP32 models, respectively for GPT2-PreLN (blue) and GPT2-PostLN (orange).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Layer Normalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To understand if pre-LN and post-LN lead to a significant difference on the quantization, we design the following experiments: (1) As GPT2 is by default using Pre-LN (GPT2-PreLN), we construct a model (GPT2-PostLN) by replacing the pre-LN with post-LN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In order to have a fair comparison between the quantization results of GPT2-PreLN and GPT2-PostLN, we directly fine-tune both models on Wikitext-103 from scratch, and the perplexities are 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='88 (PreLN) and 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='95 (PostLN) for GPT2-Medium, and 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='76 (PreLN) and 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='46 (PostLN) for GPT2-base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 3Compared to Wikitext-2 and PTB, Wikitext-103 is a considerable larger dataset and thus arrived at a low perplexity even from scratch, closer to results of the pretrained ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 7 (2) We take the above FP32 checkpoints and apply QAT with KD to obtain the best W4A4 models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The the perplexities for W4A4 are 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='66 (PreLN) and 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='79 (PostLN) for GPT2-Medium, and 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='46 (PreLN) and 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='73 (PostLN) for GPT2-base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We then calculate the perplexity gaps between the W4A4 and FP32 models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We report the results in Figure 4 of the two perplexity-gap curves for W4A4 and FP32 models, depicted by the blue curve of GPT2-PreLN and orange curve of GPT2-PostLN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The overlap phenomenon at the end of the training, respectively, demonstrates that LN may not directly affect the performance degradation for decoder-only models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='. Pretraining Effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Despite obtaining negative results on the position of layer normalization, we have identified an intriguing observation in regard to models trained from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Our experiments reveal that the gap between the student and teacher models in terms of perplexity (PPL) is smaller when training from scratch (20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='46 ppl and 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='76 ppl for INT4 and FP32, respectively) as compared to utilizing a pretrained GPT2 model (21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='54 ppl for INT4 and 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='46 for FP32).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' This observation raises questions about the potential negative effort of pretraining in the context of quantization, as the model trained from scratch appears to perform better.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 0 200 400 600 800 1000 Position 2 4 6 8 10 Max-Min Layer 1 Pretrained Scratch 0 200 400 600 800 1000 Position 1 2 3 4 Max-Min Layer 5 0 200 400 600 800 1000 Position 1 2 3 4 5 Max-Min Layer 9 0 200 400 600 800 1000 Position 2 4 6 Max-Min Layer 12 GPT2 layer: Max-Min Activation over hidden dimension (3072) of FC2 Figure 5: The gaps between the minimum and maximum activations at certain layers (Layer 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 5,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 9,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' and 12) in the second fully-connected linear module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The gaps are plotted with respect to position and the average is being taken over 8 batch sizes, with one standard deviation shaded region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To understand this, we compare the position-wise activation range between the fined-tuned models from pretrained checkpoint and from scratch (referred to as “positional activation").' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' This provides a token-level understanding on the quantization range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The results are shown in Figure 5 and it reveals the higher positional-activation range of the pretrained model as compared to the scratch-trained model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' This further supports the hypothesis that pretraining on large diverse datasets may lead to a wider range of activation values, and thus may be suboptimal for quantization as compared to models trained from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Attention Mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To gain insight into the impact of different attention mechanisms (encoder-decoder attention and causal-self-attention) on quantization errors, we conduct a comparison of BART-large and GPT2-medium models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We evaluate the “positional perplexity" of both FP32 and W4A4 models on the CNNDailyMail dataset for BART and Wikitext-2 dataset for GPT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The results are depicted in Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We make the following observations: 8 0 20 40 60 80 100 120 Position 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 Perplexity BART: Summarization (CNNDailyMail) Teacher (fp32) Student (w4a4) 0 50 100 150 200 Position 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 Perplexity GPT2: Generation (wikitext2) 0 200 400 600 800 1000 101 102 103 Zoom Out Teacher (fp32) Student (w4a4) Figure 6: The positional perplexity across the full sequence for BART and GPT2 models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (1) The curves for GPT, whether it is the teacher or student model, tends to exhibit a downward trend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The token losses at early positions are significantly higher than those at later positions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Conversely, the curves for both the teacher and student models of BART exhibit a mild upward trend, with token losses at later positions being no better than those at earlier positions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2) The perplexity degradation from quantization for the BART model is small, with a maximum gap of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 ppl at the end of the sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In contrast, the GPT model experiences large accuracy loss from quantization, with a maximum gap of over 100 ppl at the first tens of tokens of the sequence and around 2 ppl gap at later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Both phenomena highlight the importance of the additional encoder-decoder attention mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For causal-self-attention-only models (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', GPT), the next-generated token can only use the information from previous word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As such, (1) the earlier positions have less information to retrieve, which leads to larger ppl scores;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2) the INT4 model has significant perplexity degradation at the beginning positions compared to FP32 model due to the information noise from quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Thanks to the encoder-decoder attention, INT4 BART model has relatively (1) stable perplexity for all positions and (2) consistent the positional perplexity degradation as compared to FP32 counterpart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 5 Composability of INT4 Quantization In this section, we examine the composability of W4A4 to identify techniques that can be used to further accelerate INT4 inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Specifically, we investigate the potential of combining INT4 quantization with other compression techniques, such as pruning and layer reduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Our study is based on the observation that encoder-related models, such as BERT and BART, demonstrate robustness to W4A4 compression as shown in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 Composing Semi-structured Pruning with INT4 We focus on combining semi-structured pruning with W4A4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Specifically, we investigate the semi-structured sparsity called Pair-(N:M) which allows for accelerated execution on NVIDIA Ampere GPUs (Mishra et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Holmes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Pair-(N:M) sparsity structure means that there are N zero-entries for every M elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We take BERT-base as an example, as Quantization-Aware Training with Knowledge Distillation for W4A4 models has been shown to lead to better accuracy than its FP32 counterpart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We follow the training recipe described in Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 9 0 5 10 15 20 25 30 35 Iterations (x1000) 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 Val.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (MNLI-m) Fine-tuning MNLI with 3 epochs: Method | Best Acc (MNLI-m/-mm) : Q=>P | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='18/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='54 P=>Q | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='31/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='79 0 50 100 150 200 Iterations (x1000) Fine-tuning MNLI with 18 epochs: Method | Best Acc (MNLI-m/-mm) : Q=>P | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='33/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='89 P=>Q | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='56/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='04 0 50 100 150 200 250 Iterations (x1000) Fine-tuning MNLI with 21 epochs: Method | Best Acc (MNLI-m/-mm) : Q=>P | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='76 P=>Q | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='51/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='04 Figure 7: The validation accuracy (Val.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=') of the W4A4+50% sparsity (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Pair-(2:4)) BERT-base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We compare the order of pruning and quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Q=>P (orange solid curve) means the quantization algorithm is in front of the pruning algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' P=>Q (blue dash curve) is the opposite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' From left to right plots, the difference is the training epochs (see title).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Algorithm Design Order between Pruning and INT4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' When combining the two compression techniques pruning and quantization, a natural question would be the ordering in the forward pass of the two: should we put quantization in front of pruning (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Quant(Prune(W) or P=>Q), or vice versa (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Prune(Quant(W) or Q=>P).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To understand this, we fine-tune on MNLI with different training epochs using simplest ℓ1 pruning method (Han et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2015, 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' ℓ1 pruning method prunes those small absolute value to be zero while keeping those large weight value untouched.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The ℓ1 pruning mask is determined by the absolute value of the weight matrix of the teacher models and it remains fixed throughout the training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We plot the accuracy on the validation dataset in Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As can be seen, for shorter training time, P=>Q is better that Q=>P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' However, the benefits of P=>Q start to diminish as we increase the training epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Overall, it is generally recommended to perform pruning before quantization, because pruning removes unnecessary weights from the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As such, it can help mitigate the loss of precision caused by quantization and make the quantization process more effective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' With the decision to use the pruning-quantization order, we trained an INT4 BERT-base model with both 50% and 75% sparsity and reported the best validation results in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We found that a 75% sparsity level results in an accuracy drop of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='79/1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 for the MNLI-m/mm tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Therefore, if maintaining high accuracy is a priority, using a 50% sparsity level for W4A4 models is recommended.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In the appendix, we also present the results of applying 50% sparsity to W4A4 models for 8 GLUE tasks and confirm that the average GLUE scores are similar to those of the original FP32 models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Table 2: Quantization (Q) and Pruning (P) for 50% or 75% sparsity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Tasks Teacher Epoch-3 Epoch-21: P+Q MNLI- FP32 Q only 50% sparisty 75% sparisty m/mm 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='56/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='04 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='11/83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='99 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 Composing Layer-reduction with Quantization Reducing the depth of a model, also known as layer-reduction, is a more straightforward method to improve inference latency as it requires no modifications to the single-layer implementation (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' GPU kernels).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' However, it should be noted that these layer-reduced models may not be able to capture the same level of complexity or learn the same representations as the original models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To understand the compatibility of layer-reduction and quantization as well as the trade-off between model depth and quality, we perform detailed study on an encoder-decoder model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 10 Our implementation of layer-reduction strategies and fine-tuning recipes follows the work in Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022)4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' However, there are two key differences: (1) Our quantization algorithm, described in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1, differs from the one used in Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 (2) While Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022) uses 8-bit activations, we trained our model with 4-bit activations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' More Encoder or More Decoder?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' When applying layer-reduction for encoder-decoder model, we need to decide the number of encoder and decoder layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For example, when the depth is fixed at four layers, should we have more encoder layers (3-encoder and 1-decoder), more decoder layers (1-encoder and 3-decoder), or an equal number of layers for both (2-encoder and 2-decoder)?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We investigate different scenarios of x-encoder and y-decoder layers, where x + y ∈ {9, 7} and x ∈ {6, 5, 4, 3} for the case of x + y = 9, and x ∈ {3, 2, 1} for the case of x + y = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We train our models for 10 epochs with a fixed random seed and a learning rate of 5e-5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The results are reported in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A comparison of the results within the same depth (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 9, 7, and 4) reveals that it is beneficial to have more encoder layers than decoder layers, and that the decoder layers should be greater than one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Particularly, our experiments demonstrate that the performance of a 9-layer W4A4 BART model (with 6 encoder layers and 3 decoder layers) can be maintained at an acceptable level, which is only 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 lower than the 12-layer INT4 on the CNNDailyMail dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' This represents a potential latency improvement of about 50% for the decoder part while experiencing a minor accuracy drop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Table 3: The W4A4 model with layer-reduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For references, the original W4A4 BART-base (6-encoder and 6-decoder) on CNNDailyMail and XSUM are respectively 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='83/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='67/42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='08 and 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='53/18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='56/33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Encoder (Decoder) Six (Three) Five (Four) Four (Five) Three (Six) Four (Four) CNNDailyMail 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='23/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='07/41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='58 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='15/21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='02/41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='45 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='96/20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9/41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='26 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='59/20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='44/40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='82 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='77/20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='61/41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='09 XSUM 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='61/17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='83/32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='90 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='30/17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='53/32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='61 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='18/17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='47/32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='43 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='30/16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='76/31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='64 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='75/17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='10/32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='06 Encoder (Decoder) Six (One) Five (Two) Four (Three) Three (Four) Three (Three) CNNDailyMail 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='48/19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='83/40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='13 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='55/20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='56/40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='99 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='48/20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='38/40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='85 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='29/20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='14/40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='54 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='90/19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='85/40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='21 XSUM 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='23/16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='45/31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='49 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='52/17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='03/31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='98 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='43/16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='89/31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='80 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='86/16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='46/31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='31 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='22/15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='91/30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='63 Encoder (Decoder) Three (One) Two (Two) One (Three) CNNDailyMail 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='26/18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='58/38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='93 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='83/18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='82/39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='20 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='40/18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='39/38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='68 XSUM 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='88/14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='71/29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='34 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='09/14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='39/29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='01 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='69/13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='22/27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='64 Summary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We demonstrate that it is possible to combine INT4 quantization with other compression techniques, like composing INT4 and 50% Ampere-structure sparisty with aronund 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 GLUE points degradation and composing INT4 and 25% layer reduction without causing much degradation on summarization tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Fully investing the composability of quantization and other methods is beyond the scope of our paper and we leave it as future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 6 Conclusions Improving the inference efficiency of language models has been increasingly critical given their growing adoption but high compute resource requirements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' While quantization techniques enabling INT8 computation on these language models have been well explored recently, how to fully unlock the INT4 computation power of GPU is an emerging and unanswered question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In this work, we thoroughly investigate the feasibility of applying INT4 quantization to language models, and our INT4 encoder inference pipeline shows an up to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7× latency improvement over SOTA INT8 inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We also provide an in-depth analysis of the accuracy drop for decoder models when using INT4 quantization, and study the composability of INT4 quantization for encoder-related model with other compression techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 4https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='com/amazon-science/dq-bart 5We did a comparison of the two quantization algorithms and found that the algorithm for INT4 presented in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 has better accuracy than that in Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022) 11 References Abdolrashidi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Agrawal, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Malmaud, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Rybakov, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Leichner, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Lew, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Pareto-optimal quantized resnet is mostly 4-bit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3091–3099.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Aminabadi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Rajbhandari, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Awan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Li, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Li, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zheng, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Rasley, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Smith, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ruwase, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Deepspeed inference: Enabling efficient inference of transformer models at unprecedented scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='00032.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Bai, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Hou, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Shang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Jin, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Jiang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Liu, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Lyu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and King, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Binarybert: Pushing the limit of bert quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='15701.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Bengio, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Léonard, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Courville, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Estimating or propagating gradients through stochastic neurons for conditional computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1308.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3432.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Cer, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Diab, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Agirre, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Lopez-Gazpio, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Specia, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1708.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='00055.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Chung, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kim, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Choi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kwon, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Jeon, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Park, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kim, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Lee, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Extremely low bit transformer quantization for on-device neural machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='07453.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dagan, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Roth, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Sammons, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Zanzotto, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Recognizing textual entailment: Models and applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Synthesis Lectures on Human Language Technologies, 6(4):1–220.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dao, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Fu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ermon, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Rudra, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Ré, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Flashattention: Fast and memory-efficient exact attention with io-awareness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='14135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dehghani, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Gouws, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Vinyals, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Uszkoreit, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Kaiser, Ł.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Universal transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1807.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='03819.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dettmers, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Lewis, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Belkada, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Zettlemoyer, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' GPT3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='int8(): 8-bit matrix multiplication for transformers at scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Oh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Agarwal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Belgrave, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Cho, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', editors, Advances in Neural Information Processing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dettmers, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Lewis, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Belkada, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Zettlemoyer, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Llm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' int8 (): 8-bit matrix multiplication for transformers at scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='07339.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dettmers, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' and Zettlemoyer, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The case for 4-bit precision: k-bit inference scaling laws.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='09720.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dolan, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' and Brockett, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Automatically constructing a corpus of sentential paraphrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Proceedings of the Third International Workshop on Paraphrasing (IWP2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dong, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Gholami, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Mahoney, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Keutzer, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' HAWQ: Hessian aware quantization of neural networks with mixed-precision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Proceedings of the IEEE International Conference on Computer Vision, pages 293–302.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Fan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Grave, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Joulin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Reducing transformer depth on demand with structured dropout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1909.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='11556.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Gholami, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kim, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Dong, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Mahoney, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Keutzer, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A survey of quantization methods for efficient neural network inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='13630.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Gordon, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Duh, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Andrews, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Compressing bert: Studying the effects of weight pruning on transfer learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='08307.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 12 Han, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Mao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Dally, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' International Conference on Learning Representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Han, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Pool, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Tran, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Dally, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Learning both weights and connections for efficient neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Advances in neural information processing systems, pages 1135–1143.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Han, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Li, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Liu, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Tian, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Xie, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Shan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Convolutional neural network with int4 optimization on xilinx devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Xilinx White Paper, WP521.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Hermann, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kocisky, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Grefenstette, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Espeholt, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kay, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Suleyman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Blunsom, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Teaching machines to read and comprehend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1506.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='03340.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Hinton, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Vinyals, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Dean, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Distilling the knowledge in a neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Workshop paper in NIPS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Holmes, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', He, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Wu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Compressing pre-trained transformers via low-bit nxm sparsity for natural language understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='15014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Hu, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wallis, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Allen-Zhu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Chen, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Lora: Low-rank adaptation of large language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In International Conference on Learning Representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Iyer, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Dandekar, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Csernai, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' First quora dataset release: Question pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='(2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' URL https://data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' quora.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' com/First-Quora-Dataset-Release-Question-Pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Jiao, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yin, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Shang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Jiang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Chen, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Liu, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Tinybert: Distilling bert for natural language understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1909.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='10351.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Kim, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Gholami, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Mahoney, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Keutzer, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' I-bert: Integer-only bert quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In International conference on machine learning, pages 5506–5518.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' PMLR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Lagunas, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Charlaix, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Sanh, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Rush, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Block pruning for faster transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 10619–10629.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Lambda (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' GPU workstation for deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' https://lambdalabs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='com/gpu-workstations/vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Lan, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Chen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Goodman, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Gimpel, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Sharma, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Soricut, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Albert: A lite bert for self-supervised learning of language representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1909.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='11942.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' LeCun, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Denker, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Solla, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Optimal brain damage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Advances in neural information processing systems, pages 598–605.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Lewis, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Liu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Goyal, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ghazvininejad, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Mohamed, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Levy, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Stoyanov, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Zettlemoyer, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871–7880.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Li, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Liu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2016a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Ternary weight networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1605.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='04711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Li, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kadav, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Durdanovic, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Samet, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Graf, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2016b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Pruning filters for efficient convnets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1608.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='08710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Li, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Tan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Nallapati, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Bhatia, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Arnold, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Xiang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Roth, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dq-bart: Efficient sequence-to-sequence model via joint distillation and quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 203–211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Liu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Han, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ma, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Gao, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Post-training quantization for vision transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 13 Mao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Han, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Pool, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Li, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Liu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Dally, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Exploring the regularity of sparse structure in convolutional neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Workshop paper in CVPR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Mao, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Tong, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Bai, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Ladabert: Lightweight adaptation of bert through hybrid model compression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='04124.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Marcinkiewicz, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (1994).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Building a large annotated corpus of english: The penn treebank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Using Large Corpora, page 273.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Merity, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Xiong, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Bradbury, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Socher, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Pointer sentinel mixture models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In International Conference on Learning Representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Michel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Levy, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Neubig, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Are sixteen heads really better than one?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='10650.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Micikevicius, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Narang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Alben, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Diamos, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Elsen, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Garcia, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ginsburg, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Houston, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kuchaiev, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Venkatesh, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Mixed precision training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In International Conference on Learning Representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Mishra, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Latorre, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Pool, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Stosic, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Stosic, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Venkatesh, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Micikevicius, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Accelerating sparse deep neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='08378.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Narayan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Martins, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Sordoni, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Bachman, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Courville, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Bengio, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Don’t give me the details, just the summary!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' : topic-aware convolutional neural networks for extreme summarization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3706–3716.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' NVIDIA (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' CUTLASS: Fast Linear Algebra in CUDA C++.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' https://developer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='nvidia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='com/blog/ cutlass-linear-algebra-cuda/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' NVIDIA (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Employing CUDA Graphs in a Dynamic Environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' https://developer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='nvidia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='com/b log/employing-cuda-graphs-in-a-dynamic-environment/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' NVIDIA (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' FasterTransformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='com/NVIDIA/FasterTransformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Radford, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Child, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Luan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Amodei, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Sutskever, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Language models are unsupervised multitask learners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Raganato, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Scherrer, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Tiedemann, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Fixed encoder self-attention patterns in transformer- based machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='10260.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Rajpurkar, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Lopyrev, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Liang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' SQuAD: 100,000+ questions for machine comprehension of text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1606.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='05250.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Sanh, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Debut, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Chaumond, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Wolf, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='01108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Sanh, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wolf, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Rush, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Movement pruning: Adaptive sparsity by fine-tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 33:20378–20389.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Shen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Dong, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ye, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ma, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Gholami, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Mahoney, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Keutzer, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Q-BERT: Hessian based ultra low precision quantization of bert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In AAAI, pages 8815–8821.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Socher, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Perelygin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Chuang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Manning, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ng, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Potts, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Recursive deep models for semantic compositionality over a sentiment treebank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1631–1642.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Sun, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Cheng, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Gan, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Patient knowledge distillation for bert model compression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1908.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='09355.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 14 Sun, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Chen, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ni, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Agrawal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Cui, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Venkataramani, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', El Maghraoui, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Srinivasan, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Gopalakrishnan, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Ultra-low precision 4-bit training of deep neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Advances in Neural Information Processing Systems, 33:1796–1807.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Sun, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Song, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Liu, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Zhou, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Mobilebert: a compact task-agnostic bert for resource-limited devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='02984.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Tang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Liu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Kang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Mkq-bert: Quantized bert with 4-bits weights and activations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='13483.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Tenney, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Das, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Pavlick, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Bert rediscovers the classical nlp pipeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv:1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='05950.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Vaswani, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Shazeer, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Parmar, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Uszkoreit, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Jones, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Gomez, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Kaiser, Ł.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Polosukhin, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Attention is all you need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Advances in neural information processing systems, pages 5998–6008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Wang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wei, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Dong, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Bao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yang, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Zhou, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Minilm: Deep self-attention distillation for task-agnostic compression of pre-trained transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='10957.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Warstadt, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Singh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Bowman, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Neural network acceptability judgments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1805.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='12471.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Williams, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Nangia, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Bowman, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' A broad-coverage challenge corpus for sentence understanding through inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:1704.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='05426.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Wu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Li, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and He, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Extreme compression for pre-trained transformers made simple and efficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Oh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Agarwal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Belgrave, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Cho, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', editors, Advances in Neural Information Processing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Xiao, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Lin, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Seznec, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Demouth, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Han, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Smoothquant: Accurate and efficient post-training quantization for large language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv preprint arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='10438.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Xiong, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Yang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', He, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zheng, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zheng, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Xing, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Lan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Liu, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' On layer normalization in the transformer architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In International Conference on Machine Learning, pages 10524–10533.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' PMLR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Yao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Aminabadi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Zhang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Li, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and He, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Zeroquant: Efficient and affordable post-training quantization for large-scale transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Oh, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Agarwal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Belgrave, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and Cho, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', editors, Advances in Neural Information Processing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Yao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Wu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Ma, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Shen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Keutzer, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', Mahoney, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', and He, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' LEAP: Learnable Pruning for Transformer-based Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' arXiv e-prints, page arXiv:2105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='14636.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 15 A Related Work Model compression, as a technique to reduce to the model size and computation costs, can be achieved by pruning, quantization, low-rank factorization and efficient architecture designs (Han et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2016b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Mao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' LeCun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 1990;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Michel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Fan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Gordon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Raganato et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Mao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Hinton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Sanh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Jiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Lan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dehghani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Hu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Micikevicius et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Among the large body of litterateurs, we mainly cover the recent related works on INT4 quantization and system inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As described in the introduction, the 8-bit quantization for LLMs, and/or mixing with other precision, has been widely studied and proven to be effective in recent years (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dettmers et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022a,b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' However, the purely INT4 quantization, as a very aggressive technique that can have a significant impact on the accuracy of the model, is not widely used in practice and still emerging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To the best of our knowledge, we describe some more closely related works besides those mentioned in the introduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020a), a 4-bit floating point format with an adaptive gradient scaling technique is proposed to demonstrate its effectiveness in computer vision, speech and NLP tasks and solid hardware acceleration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Our study focuses on the use of INT4 quantization instead of FP4 and the acceleration hardware is based on the Ampere structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Chung et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020), a low-bits mixed precision quantization strategy is proposed to represent Transformer models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' However, their activations are kept in full precision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In Han et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020), a detailed implementation of INT4 optimization is presented, but it is only applicable to convolution networks and not transformer models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' B Quantization Algorithms Weight Quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To quantize a weight matrix W ∈ Rdin×dout in a model, we apply the group-wise quantization method proposed in Shen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' That is, we vectorize Vectorize(W) ∈ Rd (d = dindout) and participate the weight into g groups, and each group is quantized separately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The finer the quantization we use (larger g), the smaller the approximation error is between the weight matrix and the 4-bit counterpart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The largest group number we apply here is din, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', a row-wise weight quantization for best GPU utilization, Activation Quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Different from the static weight parameter during inference, the activation is dynamic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In order to achieve the best latency reduction (Gholami et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021), the static quantization method calibrates S using training data and fixes S during inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' However, this also limits the quantization representation for activation as discussed in Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Thus, we adopt a finer-grained token-wise dynamic quantization and use the min/max range for each token.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' C Additional Experimental Details and Results C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 Experimental Details for Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 All experiments are performed on V100 GPU and the training strategy is Quantization-aware Training (QAT) with Knowledge Distillation (KD).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For BERT-models, the maximum sequence length for MNLI/QQP is set to 128 with a batch-size of 64 for base and 32 for large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Each independent experiment is ran on a single GPU with a fixed random seed 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We recorded their validation performances over the training every 1000 iterations and report the best validation value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For BART-type models, we follows closely with Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022) with a slightly different hyper-parameters as shown in Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Each independent experiment is ran on 2 GPUs for base models and 4 GPUs for large model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As for GPT2-type models, we applied the pretrained hugging-face models with maximum length 1024 and a batch-size of 8 with 4 GPUs for an independent run.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' See Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 for the hyper-parameters search and we will open sources the codes and the configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Asymmetric and Symmetric Quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' To give better understand on the difference of asym- metric and symmetric quantization, we plot in Figure C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 for the iterative performance over the validation 16 datasets during the quantized-aware training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The orange curves always sits on top of the blue dash line, proving the assymetric quantization is better than symmetric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Furthermore, Figure C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 shows that (1) The gaps between symemtric and assymetric quantization appears more obvious as the model size increase from base (the first row) to large/medium (the second row), which indicates the importance of asymmetric quantization for large models;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2) While the benefits of asymmetric method (over the symmetric one) could become marginal from the beginning of the training to the end, it appears that is only the case for BERT and BART not for GPT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1: The hyper-parameters we tuned for the results in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The entry with single choice means we only use the default value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' For the entry with multiple choice, we bold the one that gives the best performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' In the table, Att with ⋆ (Att⋆) means the attention scores that is not normalized, and Att is a normalized version (note that the default output from Huggingface library is a normalized venison of attention scores).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Models BERT BART GPT Size Base Large Base Large Base Medium Dropout 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 (default) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 (default) {0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='05, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1} Clip Values {[-5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0], [-∞, +∞]} {[-1,1],[-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5,2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5]} [-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5] {[-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5,0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5],[-1, 1], [-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5]} Loss Terms Logit/Att⋆/Rep (default) ClsLoss/Logit/Att⋆/Rep {None,ClsLoss}+{Att,Att⋆}+Logit/Rep Epoch {3, 9} {3, 5} 20 8 {30, 60, 90} {15, 30, 45} batch-size 64 32 16 8 8 8 Learning Rate {5e-5, 1e-4} {2e-5, 5e-5} {2e-5, 5e-5} {2e-5, 5e-5} {5e-5, 1e-4, 5e-4} {5e-5, 1e-4} 0 10 20 30 40 50 60 82 83 84 85 86 BERT-Base Accuracy Average Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' of MNLI-m and MNLI-mm Quantization Method | Best Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' : symmetrics | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='31/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='48 asymmetrics | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='29/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='65 0 10 20 30 40 50 60 Iterations (x1000) 82 83 84 85 86 BERT-Large Accuracy Quantization Method | Best Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' : symmetrics | 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='25/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 asymmetrics | 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='49/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='28 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 29 30 31 32 33 34 BART-Base RLsum Rouge Lsum of XSUM Quantization Method | Best Rouge Lsum: symmetrics | 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='69 asymmetrics | 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='62 1 2 3 4 5 6 7 8 Epoch 32 33 34 35 36 37 BART-Large RLsum Quantization Method | Best Rouge Lsum: symmetrics | 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='33 asymmetrics | 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='79 0 10 20 30 40 50 25 26 27 28 29 30 31 32 33 GPT2-Base PPL Perplexity (PPL) of Wikitext-2 Quantization Method | Best PPL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' : symmetrics | 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='28 asymmetrics | 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='99 0 10 20 30 40 50 Iterations (x500) 18 19 20 21 22 23 24 25 26 GPT2-Medium PPL Quantization Method | Best PPL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' : symmetrics | 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='51 asymmetrics | 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='74 Figure C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1: The performance of w4a4 during the quantization-aware trainig with KD over the validation dataset for BERT (left), BART (middle), GPT (right) models, respectively with metrics: Accuracy (Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', higher is better), Rouge Lsum (RLsum, higher is better), and perplexity (PPL, lower is better).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Molde sizes in the top row are smaller than those in the bottom row.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 MKQ-BERT Results Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 shows the latency for a single BERT-base layer reported by MKQ-BERTTang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022) Table 2, compared to FasterTransformerNVIDIA (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We can see that both the FP32 and INT8 results are off by more than an order of magnitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Due to the lack of implementation details described in the MKQ-BERT paper (no open-sourced code), we cannot further identify the issue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 Sensitivity of Activation Quantization for GPT2 In this section, we study how sensitive the model quality to activation quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We relax the INT4 activation to be INT8 or back to FP32, and follows the same QAT recipe as W4A4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We plot average perplexity 17 Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2: End-to-end inference time (ms) for running one layer in BERT-base model with different batch size and sequence length on NVIDIA T4 GPUs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Column 2 to 4 are numbers taken from Tang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' FasterTransformer(FT) requires sequence length to be multiple of 32, thus the inputs in the parenthesis are used to run FasterTransformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Batch Size-Seq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Length MKQ-fp32 MKQ-int8 MKQ-in4 FT-fp32 FT-int8 16-440 (16-448) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2131 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1605 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='62 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 16-537 (16-544) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='845 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2457 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1793 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='25 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='61 16-681 (16-704) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='69 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2609 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1965 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='54 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='39 with respect to the training iteration in Figure 5 (left) as well as the position perplexity at training iteration 34000 in Figure 5 (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We see that although using QAT with KD, w4a8 (green) can be better than W4A4 but still far away from teacher’s quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Only 4-bit weight quantization (w4 only, red curve) can almost match the teacher’s quality (blue), which indicates that autoregressive generation using GPT models is highly sensitive to activation quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' It is interesting to notice that the red curve in Figure 5 (left) already flatten at the beginning of the training, which means PTQ method could be possible for weight only quantization, this aligns with observation in Dettmers and Zettlemoyer (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 More Experiments on Composing Pruning and INT4 Besides the MNLI/QQP mentioned in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1, we include the following GLUE tasks for the W4A4 quantization: MRPC (Dolan and Brockett, 2005), STS-B (Cer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017), SST-2 (Socher et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2013), QNLI (Rajpurkar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2016), QQP (Iyer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017), MNLI (Williams et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2017), CoLA (Warstadt et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2018), RTE (Dagan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2013)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The maximum sequence length is set to 64 for CoLA/SST-2, and 128 for the rest sequence pair tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3: Comparison between static and iterative pruning methods on top of w4a4 models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Here the 50% sparsity is semi-struture pruning with Pair-(2:4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We applied data augmentation for the smaller datasets and used the long training epochs (Budget-C) shown in Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The learning rate is fixed with 1e-4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Note the results for MNLI and QQP are different from Table 2 is due to the teacher models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Model Pruning Method CoLA MNLI-m/-mm MRPC QNLI QQP RTE SST-2 STS-B Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Mcc Acc/Acc F1/Acc Acc F1/Acc Acc Acc Pear/Spea all BERTbase (teacher) 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1/89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='95 w4a4+ 50% sparsity Static (weight) 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5/89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='56 Iterative (gradient) 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='61 Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4: Training budgets for the GLUE tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Dataset Data Training epochs: Aug.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Budget-A Budget-B Budget-C QQP/MNLI no 3 9 18 or 36 QNLI yes 1 3 6 or 9 SST-2/STS-B/RTE yes 1 3 12 CoLA/MRPC yes 1 3 12 or 18 18 Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5: Results with data augmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' W4A4 with Budget-A Cost learning CoLA MNLI-m/-mm MRPC QNLI QQP RTE SST-2 STS-B Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' w/o rate Mcc Acc/Acc F1/Acc Acc F1/Acc Acc Acc Pear/Spea all CoLA BERTlarge (fp32) 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='05 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='00 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3/90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='81 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='50 Budget-A 1e-05 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2/87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3/90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='20 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='96 5e-05 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0/87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7/90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7/90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='72 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0001 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9/87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='80 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='69 Best (above) 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2/87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7/90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='23 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='81 BERTbase (fp32) 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1/89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='95 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='98 Budget-A 2e-05 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='1 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2/87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8/89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='97 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='08 5e-05 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0/89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='74 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0001 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='9/89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='90 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='90 Best (above) 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='8/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2/87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4/91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='4 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='7 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0/89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='6 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='24 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='21 2 4 6 8 10 Iteration (x3400) 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 Perplexity Average ppl w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='t to iterations Teacher (fp32) w4a4 w4a8 w4 only 0 25 50 75 100 125 150 175 200 Position 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='0 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='5 Perplexity Positioin-wise ppl at iteration 34000 0 2 4 6 8 10 0 100 200 300 Zoom Out Y-axis GPT2: Generation (wikitext2) 0 5 10 15 20 25 30 35 Iterations (x1000) 81 82 83 84 85 86 87 Val.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' (MNLI-m) Fine-tuning MNLI with 3 epochs: Method | Best Acc (MNLI-m/-mm) : Dynamic Mov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' | 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='83/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='07 Static 1 | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='31/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='79 0 50 100 150 200 Iterations (x1000) Fine-tuning MNLI with 18 epochs: Method | Best Acc (MNLI-m/-mm) : Dynamic Mov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='45/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='55 Static 1 | 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='56/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='04 Figure C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2: The top two figures are for GPT2 quantization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The bottom two figures are for the comparison between movement and ℓ1 pruning with QAT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Static pruning or iterative movement pruning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Now that we decide to apply prune and then quantize (P=>Q) algorithm, one may wonder if the ℓ1 pruning method used above is the best pruning algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Recent advancement on pruning methods suggests that Movement Pruning with iterative pruning threshold (Sanh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Lagunas et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=', 2021) has been proven to be effective in transfer learning for languages models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' That is, during the iterative pruning, the mask will be updated and determined by the gradients of the weight instead of the value of the weight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Previous works only work on pruning only, here we investigate on whether it works well with quantized models and layerwise KD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' The results are shown in Figure C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='2 or Table C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' We see that ℓ1 is consistently better under long or short training epochs, although the gap between the two methods can be reduced with sufficient iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' As the iterative pruning based on the gradient of weight matrices requires to update the masks dynamically, the computation complexity/time is much higher than that using the static masks under the same training iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' Thus, this finding indicates that static pruning is sufficient when applying KD for QAT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} +page_content=' 19' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/MNFLT4oBgHgl3EQfMy8Y/content/2301.12017v1.pdf'} diff --git a/NtE3T4oBgHgl3EQfwwva/content/2301.04706v1.pdf b/NtE3T4oBgHgl3EQfwwva/content/2301.04706v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d048abec0f7aebe93032bd50b7fd91c4bd06356c --- /dev/null +++ b/NtE3T4oBgHgl3EQfwwva/content/2301.04706v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c847b52f690662f875d189322fd2150580e13b0687dbb4656e6d0bb85815d943 +size 3887555 diff --git a/NtE3T4oBgHgl3EQfwwva/vector_store/index.faiss b/NtE3T4oBgHgl3EQfwwva/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..7f5e6b59d2d2546ca40c6d2a2908112b8a35fa16 --- /dev/null +++ b/NtE3T4oBgHgl3EQfwwva/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a31790be372aec2c8ce024f72b9440d5e499e21f1bce6a615f8f225f7d2ec49 +size 6684717 diff --git a/OdAzT4oBgHgl3EQflP0i/content/tmp_files/2301.01543v1.pdf.txt b/OdAzT4oBgHgl3EQflP0i/content/tmp_files/2301.01543v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..354a01a68fbced869ba32c9ff1d60e9ed718ec6e --- /dev/null +++ b/OdAzT4oBgHgl3EQflP0i/content/tmp_files/2301.01543v1.pdf.txt @@ -0,0 +1,418 @@ +arXiv:2301.01543v1 [stat.ME] 4 Jan 2023 +A note on the variance in principal component regression +Bert van der Veen1 +1Department of Mathematical Sciences, Norwegian University of Science and Technology, +Trondheim, Norway +Summary +Principal component regression is a popular method to use when the predictor matrix in a regression is +of reduced column rank. It has been proposed to stabilize computation under such conditions, and to +improve prediction accuracy by reducing variance of the least squares estimator for the regression slopes. +However, it presents the added difficulty of having to determine which principal components to include +in the regression. I provide arguments against selecting the principal components by the magnitude of +their associated eigenvalues, by examining the estimator for the residual variance, and by examining the +contribution of the residual variance to the variance of the estimator for the regression slopes. I show that +when a principal component is omitted from the regression that is important in explaining the response +variable, the residual variance is overestimated, so that the variance of the estimator for the regression +slopes can be higher than that of the ordinary least squares estimator. +keywords: PC-regression, shrinkage, ordination, dimension reduction, multicollinearity. +Introduction +When there are many predictors in linear regression, or when predictors are colinear, many researchers +replace the predictors with fewer principal components, derived from the predictor matrix, instead (PCs, +Pearson 1901). PCs have the benefit of being orthogonal, so that including PCs has the tendency to stabilize +computation (Hotelling 1957; Jolliffe 1982). +Jolliffe (1982) noted that the original idea behind PC-regression was to include all PCs, while authors +usually omit PCs with small eigenvalues. Consequently, many authors have cautioned against the potential +consequences of PC-regression (Artigue and Smith 2019; Hadi and Ling 1998; Mardia, Kent, and Bibby 1980). +However, note that there are better ways to determine which PCs should be included in the regression, e.g., +1 + +with cross-validation (Hastie, Friedman, and Tibshirani 2001) or significance testing (Mardia, Kent, and +Bibby 1980), which instead focus on the variation PCs explain in the response. +However, the arguments made against omitting PCs with small eigenvalues have mostly been qualitative +than quantitative (Jolliffe 1982; Artigue and Smith 2019). Hadi and Ling (1998) notes that the sum of +squared errors obtained by PCR will never be lower than that obtained by ordinary least squares (OLS). +Næs and Martens (1988) discusses the variance of the PC-regression estimator but neglects to discuss how +the PC-regression estimator for the residual variance compares to the OLS estimator. +To improve on this I here provide various expressions for the variance of the PC-regression estimator of +the regression slopes, and for the PC-regression estimator of the residual variance, to indicate that 1) the +PC-regression estimator for the residual variance is biased upwards, and that consequently 2) the variance +of the PC-regression estimators for the slope parameters is usually, but not always, smaller than that of the +OLS estimator. +PC-regression +When including d PCs in a regression, instead of the r measured predictors, the matrix of predictors X +with i = 1 . . . n rows and p = 1 . . . K columns is first subjected to a Singular Value Decomposition (SVD) +to retrieve its left U = XV Σ−1 and right V = X⊤UΣ−1 singular vectors, where Σ a diagonal matrix of +singular values. In applied sciences, the left singular vectors take the interpretation of gradients or latent +variables, such as temperature, climate, or genetic similarity in ecology and evolution, or types of behavior +such as aggressiveness, kindness, or passiveness in social science. As such, the columns of U represent a +compound of effects vaguely related to the original identities of the predictor variables. +Let ˜X = X−UkΣkV ⊤ +k denote the predictor variables “observed” with error, or equivalently, the predictor +matrix as reconstructed from its SVD with the first d left singular vectors, where βd and βk are the (slope) +parameters due to the first d and last k left singular vectors of X, and where usually d ≤ K, we have: +y = ˜Xβ ++ǫ += +� +X − U kΣkV ⊤ +k +�� +βd + βk +� ++ǫ += Xβd ++ǫP C,d, +where ǫP C,d = Xβk − U dΣdV ⊤ +d βk − UkΣkV ⊤ +k + ǫ +(1) +where ǫ is the error from a regression with the predictor variables. Here, ǫP C,d is the error from a regression +with d left singular vectors. Let ˆβd to be the K × 1-sized vector of parameter estimates from a regression of +the response on d first left singular vectors, i.e., the estimators that maximize the likelihood of the model: +2 + +y = U dβP C,d+ǫP C,d, +where βP C=ΣdV ⊤ +d βd += Xβd ++ǫP C,d, +where βd +=V dΣ−1 +d βP C,d, +(2) +where βP C,d a d × 1-sized vector of slope parameters for the first d left singular vectors, and so that we have +a similar estimator for βk. Consequently, the terms U dΣdV ⊤ +d βk and U kΣkV ⊤ +k βd from equation (1) are +zero due to orthogonality of the singular vectors, and we have that β = V dΣ−1 +d βP C,d + V kΣ−1 +k βP C,k or +equivalently β = βd + βk, and cov(ˆβk, ˆβd) = 0. We thus see that the discrepancy of the PC-regression error +relative to the error from a regression fitted by OLS can be estimated as HP C,ky, so that we have: +y = Xβ+ǫP C,d, +where ǫP C,d = ǫ + HP C,ky, +(3) +where HP C,k = XV kV ⊤ +k (X⊤X)−1X⊤ = U kU⊤ +k is the hat matrix for a regression of y on the last k right +singular vectors with errors ǫP C,k (without additional intercept). As such, the the error in PC-regression +is given as the sum of the error from a regression with the predictor variables, and that of a PC-regression +with the k remaining left singular vectors. +The two terms that make up the residual of PC-regression in the last line of equation (3) are independent, +so that the sum of squared errors is: +ǫ⊤ +P C,dǫP C,d = ǫ⊤ǫ + y⊤HP C,ky += ǫ⊤ǫ + y⊤HP C,ky += y⊤(I − H)y + y⊤HP C,ky += y⊤(I − HP C,d)y. +(4) +Consequently, the expected value of the sum of squared errors for PC-regression is: +E(ǫ⊤ +P C,dǫP C,d) = E(ǫ⊤ǫ) + E(y⊤HP C,ky) += σ2(n − r) + (β − βd)⊤X⊤X(β − βd), +(5) +so that the estimator for the residual variance in a PC-regression is: +ˆσ2 +P C,d = σ2(n − r) +n − d ++ (β − βd)⊤X⊤X(β − βd) +n − d +, +(6) +and an unbiased estimator for the residual variance of linear regression σ2, based on the PC-regression +3 + +estimator, is: +ˆσ2 = +ˆσ2 +P C,d(n − d) − y⊤HP C,ky +n − r +. +(7) +From equation (6) it is possible conclude that ˆσ2 +P C,d is generally an upwards biased estimator for σ2 with +d < K, and similarly for σ2 +P C,k. More specifically, the bias for the PC-regression estimator is: +E(ˆσ2 +P C,d − σ2) = σ2(n − r) +n − d ++E(y⊤HP C,ky) +n − d +− σ2 += (n − r +n − d − 1)σ2+(β − βd)⊤X⊤X(β − βd) +n − d +, +(8) +which shows that the bias is likely to increase with the number of left signular vectors omitted k, and +that both terms converge to zero as d → r, i.e., when the PC-regression estimator coincides with the OLS +estimator. However, note that the bias induced depends on the importance of each omitted left singular +vector in explaining the response variable, and that if none of the omitted left singular vectors is relevant in +explaining the response variable, the residual variance is instead underestimated rather than overestimated +by PC-regression. Further, as n → ∞, the second term in equation (8) dominates the bias of the residual +variance, but that if the omitted left singular vector holds little importance and there are few included left +singular vectors, the bias in the residual variance estimate by PC-regression will be negligible. +The residual variance of a PC-regression can alternatively be formulated as a function of the residual variance +of a regression for each separate dimension, due to the orthogonality of the singular vectors. Specifically, we +see that y⊤Hy = +k� +q=1 +y⊤HP C,qy, where HP C,q is the hat matrix of a regression that only includes the qth +left singular vector, so that for the residual variance we have: +ˆσ2 +P C,d = +σ2(n − r) + y⊤y − σ2 +P C,k(n − r + d) +n − d += +σ2(n − r) + y⊤y(k) − (n − 1) +k� +q=1 +σ2 +P C,q +n − d += +(n − 1) +d� +q=1 +σ2 +P C,q − y⊤y(d − 1) +n − d +, +(9) +where σ2 +P C,q is a vector of residual variances for each regression. +So far we had: +ˆβd ∼ N(V dΣ−1 +d βP C,d, V dΣ−1 +d ΣP C,dΣ−1 +d V ⊤ +d ), +(10) +4 + +or equivalently: +ˆβd ∼ N{(X⊤X)−1V V ⊤y, V dΣ−1 +d ΣP C,dΣ−1 +d V ⊤ +d }. +(11) +However, with the result from above, we are able to write the estimated covariance matrix of ˆβd in terms +of the covariance matrix of a regression with the predictors, and of a regression with the remaining k left +singular vectors. This will facilitate a better understanding of changes in the variance of the PC-regression +estimator for different numbers left singular vectors in a PC-regression. Let again ˜X = X − U kΣkV ⊤ +k +denote the predictors observed with error as included in a PC-regression. Then, +var(ˆβd) = ( ˜X +⊤ ˜X)−1σ2 +P C,d += var(ˆβ)V dV ⊤ +d +σ2 +P C,d +σ2 +, +(12) +where we note that the diagonal of V dV ⊤ +d monotonically increases in the number of left singular vectors +in the regression d, and that σ2 +P C,d/σ2 ≥ 1 so that it represents the inflation in residual variance of the +PC-regression due to omission of the last k left singular vectors (see appendix S1 for an expanded proof). +This further demonstrates that var(ˆβ)V dV ⊤ +d ∝ var(ˆβd), with the benefit that V dV ⊤ +d can be computed +without fitting a regression, but only with the matrix of predictors and its SVD. The diagonal entries of the +outer product of the first d right singular vectors of X are thus equal the proportional variance decrease +by omitting k left singular vectors of the variance for the OLS estimator. This result emphasizes that the +variance of the PC-regression estimator decreases monotonically in the number of left singular vectors used +to approximate the OLS estimator. However, since +ˆ +σ2 +P C,d increases in the number of omitted left singular +vectors k, the variance of the PC-regression estimator does not need to be lower than that of the OLS +estimator, unless some eigenvalues are zero. +Finally, the variance of the PC-regression can also be written as a difference between the variance of the +estimators of a regression with the predictors, and that of the estimator for a regression of the remaining k +left singular vectors: +var(ˆβd) = var(ˆβ)(IK − V kV ⊤ +k ) +σ2 +P C,d +σ2 += {var(ˆβ) − +σ2 +σ2 +P C,k +var(ˆβk)} +σ2 +P C,d +σ2 +. +(13) +consequently, we have var(ˆβ) = var(ˆβd) +σ2 +σ2 +P C,d +var(ˆβk) +σ2 +σ2 +P C,k (see appendix S2 for an expanded proof). Here, +5 + +σ2/σ2 +P C,k ≤ 1 increases in k and converges to one as k → r and is zero at d = r. +Consequently, the +PC-regression estimator studied here has sampling distribution ˆβd ∼ N{β − βk, var(ˆβ)V dV ⊤ +d +σ2 +P C,d +σ2 +}, or +alternatively, ˆβd ∼ N{(X⊤X)−1V V ⊤y, var(ˆβ) +σ2 +P C,d +σ2 +− var(ˆβk) +σ2 +P C,d +σ2 +P C,k }. +Concluding remarks +Similarly to Hadi and Ling (1998), I conclude that omitting dimensions as in PC-regression is likely to +result in lack of fit. Various authors have pointed out that it is important to consider the relationship of +PCs with the response variable, and that it is not the magnitude of the associated eigenvalue that should +dictate whether a PC is included in a PC-regression (Hadi and Ling 1998; Jolliffe 1982; Artigue and Smith +2019). This is further supported by results due to various expressions in this article, but by equation (6) in +particular. It is the relationship of PCs with the response variable that determines how significant an issue +omitting dimensions presents. If a PC is omitted that is unrelated to the response variable, PC-regression will +underestimate the residual variance relative to OLS, and the variance of the PC-regression estimator of the +regression slopes will be smaller than the variance of the OLS estimator. If a PC is omitted that is important +in explaining the response variable, PC-regression will result in lack of fit, the residual will depend on the +predictors, and as a consequence the residual variance will be overestimated. Consequently, the magnitude +of the variance of the PC-regression estimator for the regression s lopes depends on the importance of the +omitted PCs in explaining the response variable, which cannot be assessed from the eigenvalues of the PCA. +Fortunately, better strategies for determining relevant PCs to include in a regression exist (Mardia, Kent, +and Bibby 1980; Hastie, Friedman, and Tibshirani 2001), while ridge regression (Bair et al. 2006; Frank +and Friedman 1993; Dormann et al. 2013) or Partial Least Squares (Dormann et al. 2013; Liland, Mevik, +and Wehrens 2022) represent less ambiguous and more rigorous methodologies that can be used instead of +PC-regression. +Acknowledgements +The writing of this short note was motivated by the lack of nuance on the variance of the PC-regression es- +timator in the associated Wikipedia article (wikipedia.org/wiki/Principal_component_regression). I would +like to than Erik Blystad Solbu and Robert Brian O’Hara for comments on an earlier draft of the manuscript. +6 + +References +Artigue, Heidi, and Gary Smith. 2019. “The Principal Problem with Principal Components Regression.” +Edited by Zudi Lu. Cogent Mathematics & Statistics 6 (1): 1622190. https://doi.org/10.1080/25742558.2019.1622190. +Bair, Eric, Trevor Hastie, Debashis Paul, and Robert Tibshirani. 2006. “Prediction by Supervised Principal +Components.” Journal of the American Statistical Association 101 (473): 119–37. +Dormann, Carsten F, Jane Elith, Sven Bacher, Carsten Buchmann, Gudrun Carl, Gabriel Carré, Jaime R +García Marquéz, et al. 2013. “Collinearity: A Review of Methods to Deal with It and a Simulation Study +Evaluating Their Performance.” Ecography 36 (1): 27–46. +Frank, lldiko E., and Jerome H. Friedman. 1993. “A Statistical View of Some Chemometrics Regression +Tools.” Technometrics 35 (2): 109–35. https://doi.org/10.1080/00401706.1993.10485033. +Hadi, Ali S., and Robert F. Ling. 1998. “Some Cautionary Notes on the Use of Principal Components +Regression.” The American Statistician 52 (1): 15–19. https://doi.org/10.2307/2685559. +Hastie, Trevor, Jerome Friedman, and Robert Tibshirani. +2001. +The Elements of Statistical Learning. +Springer Series in Statistics. New York, NY: Springer. https://doi.org/10.1007/978-0-387-21606-5. +Hotelling, Harold. 1957. “The Relations of the Newer Multivariate Statistical Methods to Factor Analysis.” +British Journal of Statistical Psychology 10 (2): 69–79. https://doi.org/10.1111/j.2044-8317.1957.tb00179.x. +Jolliffe, Ian T. 1982. “A Note on the Use of Principal Components in Regression.” Journal of the Royal +Statistical Society. Series C (Applied Statistics) 31 (3): 300–303. https://doi.org/10.2307/2348005. +Liland, Kristian Hovde, Bjørn-Helge Mevik, and Ron Wehrens. +2022. +Pls: Partial Least Squares and +Principal Component Regression. Manual. https://CRAN.R-project.org/package=pls. +Mardia, Kanti V., J. T. Kent, and J. M. Bibby. 1980. Multivariate Analysis. 1st edition. London ; New +York: Academic Press. +Næs, Tormod, and Harald Martens. 1988. “Principal Component Regression in NIR Analysis: Viewpoints, +Background Details and Selection of Components.” Journal of Chemometrics 2 (2): 155–67. +Pearson, Karl. +1901. +“LIII. On Lines and Planes of Closest Fit to Systems of Points in Space.” +The +London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science 2 (11): +559–72. +https://doi.org/10.1080/14786440109462720. +7 + +Appendix +Appendix S1 +var(ˆβd) = (X − U kΣkX⊤ +V ,k)⊤(X − U kΣkX⊤ +V ,k)−1σ2 +P C,d += (V dΣdu⊤ +d udΣdV ⊤ +d )−1σ2 +P C,d += (V dΣdΣdV ⊤ +d )−1σ2 +P C,d += V dΣ−1 +d Σ−1 +d V ⊤ +d σ2 +P C,d += V dV ⊤ +d (X⊤X)−1V dV ⊤ +d σ2 +P C,d += (X⊤X)−1V dV ⊤ +d V dV ⊤ +d σ2 +P C,d += (X⊤X)−1V dV ⊤ +d σ2 +P C,d += var(ˆβ)V dV ⊤ +d +σ2 +P C,d +σ2 +. +(14) +Appendix S2 +var(ˆβd) = var(ˆβ)(IK − V kV ⊤ +k ) +σ2 +P C,d +σ2 += var(ˆβ)(IK − X⊤XV kΣ−2 +k V ⊤ +k ) +σ2 +P C,d +σ2 += {var(ˆβ) − var(ˆβ)X⊤XV kΣ−2 +k V ⊤ +k } +σ2 +P C,d +σ2 += {var(ˆβ) − (X⊤X)−1σ2X⊤XV kΣ−2 +k V ⊤ +k } +σ2 +P C,d +σ2 += {var(ˆβ) − σ2V kΣ−2 +k V ⊤ +k } +σ2 +P C,d +σ2 += {var(ˆβ) − +σ2 +σ2 +P C,k +var(ˆβk)} +σ2 +P C,d +σ2 +. +(15) +8 + diff --git a/OdAzT4oBgHgl3EQflP0i/content/tmp_files/load_file.txt b/OdAzT4oBgHgl3EQflP0i/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..2420fbfe2423dc9c8170640d40b6524f3145fa6b --- /dev/null +++ b/OdAzT4oBgHgl3EQflP0i/content/tmp_files/load_file.txt @@ -0,0 +1,181 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf,len=180 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='01543v1 [stat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='ME] 4 Jan 2023 A note on the variance in principal component regression Bert van der Veen1 1Department of Mathematical Sciences, Norwegian University of Science and Technology, Trondheim, Norway Summary Principal component regression is a popular method to use when the predictor matrix in a regression is of reduced column rank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' It has been proposed to stabilize computation under such conditions, and to improve prediction accuracy by reducing variance of the least squares estimator for the regression slopes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' However, it presents the added difficulty of having to determine which principal components to include in the regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' I provide arguments against selecting the principal components by the magnitude of their associated eigenvalues, by examining the estimator for the residual variance, and by examining the contribution of the residual variance to the variance of the estimator for the regression slopes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' I show that when a principal component is omitted from the regression that is important in explaining the response variable, the residual variance is overestimated, so that the variance of the estimator for the regression slopes can be higher than that of the ordinary least squares estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' keywords: PC-regression, shrinkage, ordination, dimension reduction, multicollinearity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Introduction When there are many predictors in linear regression, or when predictors are colinear, many researchers replace the predictors with fewer principal components, derived from the predictor matrix, instead (PCs, Pearson 1901).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' PCs have the benefit of being orthogonal, so that including PCs has the tendency to stabilize computation (Hotelling 1957;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Jolliffe 1982).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Jolliffe (1982) noted that the original idea behind PC-regression was to include all PCs, while authors usually omit PCs with small eigenvalues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Consequently, many authors have cautioned against the potential consequences of PC-regression (Artigue and Smith 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Hadi and Ling 1998;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Mardia, Kent, and Bibby 1980).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' However, note that there are better ways to determine which PCs should be included in the regression, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=', 1 with cross-validation (Hastie, Friedman, and Tibshirani 2001) or significance testing (Mardia, Kent, and Bibby 1980), which instead focus on the variation PCs explain in the response.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' However, the arguments made against omitting PCs with small eigenvalues have mostly been qualitative than quantitative (Jolliffe 1982;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Artigue and Smith 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Hadi and Ling (1998) notes that the sum of squared errors obtained by PCR will never be lower than that obtained by ordinary least squares (OLS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Næs and Martens (1988) discusses the variance of the PC-regression estimator but neglects to discuss how the PC-regression estimator for the residual variance compares to the OLS estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' To improve on this I here provide various expressions for the variance of the PC-regression estimator of the regression slopes, and for the PC-regression estimator of the residual variance, to indicate that 1) the PC-regression estimator for the residual variance is biased upwards, and that consequently 2) the variance of the PC-regression estimators for the slope parameters is usually, but not always, smaller than that of the OLS estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' PC-regression When including d PCs in a regression, instead of the r measured predictors, the matrix of predictors X with i = 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' n rows and p = 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' K columns is first subjected to a Singular Value Decomposition (SVD) to retrieve its left U = XV Σ−1 and right V = X⊤UΣ−1 singular vectors, where Σ a diagonal matrix of singular values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' In applied sciences, the left singular vectors take the interpretation of gradients or latent variables, such as temperature, climate, or genetic similarity in ecology and evolution, or types of behavior such as aggressiveness, kindness, or passiveness in social science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' As such, the columns of U represent a compound of effects vaguely related to the original identities of the predictor variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Let ˜X = X−UkΣkV ⊤ k denote the predictor variables “observed” with error,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' or equivalently,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' the predictor matrix as reconstructed from its SVD with the first d left singular vectors,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' where βd and βk are the (slope) parameters due to the first d and last k left singular vectors of X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' and where usually d ≤ K,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' we have: y = ˜Xβ +ǫ = � X − U kΣkV ⊤ k �� βd + βk � +ǫ = Xβd +ǫP C,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='d,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' where ǫP C,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='d = Xβk − U dΣdV ⊤ d βk − UkΣkV ⊤ k + ǫ (1) where ǫ is the error from a regression with the predictor variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Here, ǫP C,d is the error from a regression with d left singular vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Let ˆβd to be the K × 1-sized vector of parameter estimates from a regression of the response on d first left singular vectors, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=', the estimators that maximize the likelihood of the model: 2 y = U dβP C,d+ǫP C,d, where βP C=ΣdV ⊤ d βd = Xβd +ǫP C,d, where βd =V dΣ−1 d βP C,d, (2) where βP C,d a d × 1-sized vector of slope parameters for the first d left singular vectors, and so that we have a similar estimator for βk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Consequently, the terms U dΣdV ⊤ d βk and U kΣkV ⊤ k βd from equation (1) are zero due to orthogonality of the singular vectors, and we have that β = V dΣ−1 d βP C,d + V kΣ−1 k βP C,k or equivalently β = βd + βk, and cov(ˆβk, ˆβd) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' We thus see that the discrepancy of the PC-regression error relative to the error from a regression fitted by OLS can be estimated as HP C,ky, so that we have: y = Xβ+ǫP C,d, where ǫP C,d = ǫ + HP C,ky, (3) where HP C,k = XV kV ⊤ k (X⊤X)−1X⊤ = U kU⊤ k is the hat matrix for a regression of y on the last k right singular vectors with errors ǫP C,k (without additional intercept).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' As such, the the error in PC-regression is given as the sum of the error from a regression with the predictor variables, and that of a PC-regression with the k remaining left singular vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' The two terms that make up the residual of PC-regression in the last line of equation (3) are independent, so that the sum of squared errors is: ǫ⊤ P C,dǫP C,d = ǫ⊤ǫ + y⊤HP C,ky = ǫ⊤ǫ + y⊤HP C,ky = y⊤(I − H)y + y⊤HP C,ky = y⊤(I − HP C,d)y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' (4) Consequently, the expected value of the sum of squared errors for PC-regression is: E(ǫ⊤ P C,dǫP C,d) = E(ǫ⊤ǫ) + E(y⊤HP C,ky) = σ2(n − r) + (β − βd)⊤X⊤X(β − βd), (5) so that the estimator for the residual variance in a PC-regression is: ˆσ2 P C,d = σ2(n − r) n − d + (β − βd)⊤X⊤X(β − βd) n − d , (6) and an unbiased estimator for the residual variance of linear regression σ2, based on the PC-regression 3 estimator, is: ˆσ2 = ˆσ2 P C,d(n − d) − y⊤HP C,ky n − r .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' (7) From equation (6) it is possible conclude that ˆσ2 P C,d is generally an upwards biased estimator for σ2 with d < K, and similarly for σ2 P C,k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' More specifically, the bias for the PC-regression estimator is: E(ˆσ2 P C,d − σ2) = σ2(n − r) n − d +E(y⊤HP C,ky) n − d − σ2 = (n − r n − d − 1)σ2+(β − βd)⊤X⊤X(β − βd) n − d , (8) which shows that the bias is likely to increase with the number of left signular vectors omitted k, and that both terms converge to zero as d → r, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=', when the PC-regression estimator coincides with the OLS estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' However, note that the bias induced depends on the importance of each omitted left singular vector in explaining the response variable, and that if none of the omitted left singular vectors is relevant in explaining the response variable, the residual variance is instead underestimated rather than overestimated by PC-regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Further, as n → ∞, the second term in equation (8) dominates the bias of the residual variance, but that if the omitted left singular vector holds little importance and there are few included left singular vectors, the bias in the residual variance estimate by PC-regression will be negligible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' The residual variance of a PC-regression can alternatively be formulated as a function of the residual variance of a regression for each separate dimension, due to the orthogonality of the singular vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Specifically, we see that y⊤Hy = k� q=1 y⊤HP C,qy, where HP C,q is the hat matrix of a regression that only includes the qth left singular vector, so that for the residual variance we have: ˆσ2 P C,d = σ2(n − r) + y⊤y − σ2 P C,k(n − r + d) n − d = σ2(n − r) + y⊤y(k) − (n − 1) k� q=1 σ2 P C,q n − d = (n − 1) d� q=1 σ2 P C,q − y⊤y(d − 1) n − d , (9) where σ2 P C,q is a vector of residual variances for each regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' So far we had: ˆβd ∼ N(V dΣ−1 d βP C,d, V dΣ−1 d ΣP C,dΣ−1 d V ⊤ d ), (10) 4 or equivalently: ˆβd ∼ N{(X⊤X)−1V V ⊤y, V dΣ−1 d ΣP C,dΣ−1 d V ⊤ d }.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' (11) However, with the result from above, we are able to write the estimated covariance matrix of ˆβd in terms of the covariance matrix of a regression with the predictors, and of a regression with the remaining k left singular vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' This will facilitate a better understanding of changes in the variance of the PC-regression estimator for different numbers left singular vectors in a PC-regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Let again ˜X = X − U kΣkV ⊤ k denote the predictors observed with error as included in a PC-regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Then, var(ˆβd) = ( ˜X ⊤ ˜X)−1σ2 P C,d = var(ˆβ)V dV ⊤ d σ2 P C,d σ2 , (12) where we note that the diagonal of V dV ⊤ d monotonically increases in the number of left singular vectors in the regression d, and that σ2 P C,d/σ2 ≥ 1 so that it represents the inflation in residual variance of the PC-regression due to omission of the last k left singular vectors (see appendix S1 for an expanded proof).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' This further demonstrates that var(ˆβ)V dV ⊤ d ∝ var(ˆβd), with the benefit that V dV ⊤ d can be computed without fitting a regression, but only with the matrix of predictors and its SVD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' The diagonal entries of the outer product of the first d right singular vectors of X are thus equal the proportional variance decrease by omitting k left singular vectors of the variance for the OLS estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' This result emphasizes that the variance of the PC-regression estimator decreases monotonically in the number of left singular vectors used to approximate the OLS estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' However, since ˆ σ2 P C,d increases in the number of omitted left singular vectors k, the variance of the PC-regression estimator does not need to be lower than that of the OLS estimator, unless some eigenvalues are zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Finally, the variance of the PC-regression can also be written as a difference between the variance of the estimators of a regression with the predictors, and that of the estimator for a regression of the remaining k left singular vectors: var(ˆβd) = var(ˆβ)(IK − V kV ⊤ k ) σ2 P C,d σ2 = {var(ˆβ) − σ2 σ2 P C,k var(ˆβk)} σ2 P C,d σ2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' (13) consequently, we have var(ˆβ) = var(ˆβd) σ2 σ2 P C,d +var(ˆβk) σ2 σ2 P C,k (see appendix S2 for an expanded proof).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Here, 5 σ2/σ2 P C,k ≤ 1 increases in k and converges to one as k → r and is zero at d = r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Consequently, the PC-regression estimator studied here has sampling distribution ˆβd ∼ N{β − βk, var(ˆβ)V dV ⊤ d σ2 P C,d σ2 }, or alternatively, ˆβd ∼ N{(X⊤X)−1V V ⊤y, var(ˆβ) σ2 P C,d σ2 − var(ˆβk) σ2 P C,d σ2 P C,k }.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Concluding remarks Similarly to Hadi and Ling (1998), I conclude that omitting dimensions as in PC-regression is likely to result in lack of fit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Various authors have pointed out that it is important to consider the relationship of PCs with the response variable, and that it is not the magnitude of the associated eigenvalue that should dictate whether a PC is included in a PC-regression (Hadi and Ling 1998;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Jolliffe 1982;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Artigue and Smith 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' This is further supported by results due to various expressions in this article, but by equation (6) in particular.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' It is the relationship of PCs with the response variable that determines how significant an issue omitting dimensions presents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' If a PC is omitted that is unrelated to the response variable, PC-regression will underestimate the residual variance relative to OLS, and the variance of the PC-regression estimator of the regression slopes will be smaller than the variance of the OLS estimator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' If a PC is omitted that is important in explaining the response variable, PC-regression will result in lack of fit, the residual will depend on the predictors, and as a consequence the residual variance will be overestimated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Consequently, the magnitude of the variance of the PC-regression estimator for the regression s lopes depends on the importance of the omitted PCs in explaining the response variable, which cannot be assessed from the eigenvalues of the PCA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Fortunately, better strategies for determining relevant PCs to include in a regression exist (Mardia, Kent, and Bibby 1980;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Hastie, Friedman, and Tibshirani 2001), while ridge regression (Bair et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 2006;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Frank and Friedman 1993;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Dormann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 2013) or Partial Least Squares (Dormann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Liland, Mevik, and Wehrens 2022) represent less ambiguous and more rigorous methodologies that can be used instead of PC-regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Acknowledgements The writing of this short note was motivated by the lack of nuance on the variance of the PC-regression es- timator in the associated Wikipedia article (wikipedia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/wiki/Principal_component_regression).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' I would like to than Erik Blystad Solbu and Robert Brian O’Hara for comments on an earlier draft of the manuscript.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 6 References Artigue, Heidi, and Gary Smith.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “The Principal Problem with Principal Components Regression.” Edited by Zudi Lu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Cogent Mathematics & Statistics 6 (1): 1622190.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='1080/25742558.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='1622190.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Bair, Eric, Trevor Hastie, Debashis Paul, and Robert Tibshirani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “Prediction by Supervised Principal Components.” Journal of the American Statistical Association 101 (473): 119–37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Dormann, Carsten F, Jane Elith, Sven Bacher, Carsten Buchmann, Gudrun Carl, Gabriel Carré, Jaime R García Marquéz, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “Collinearity: A Review of Methods to Deal with It and a Simulation Study Evaluating Their Performance.” Ecography 36 (1): 27–46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Frank, lldiko E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=', and Jerome H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Friedman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 1993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “A Statistical View of Some Chemometrics Regression Tools.” Technometrics 35 (2): 109–35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='1080/00401706.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='1993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='10485033.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Hadi, Ali S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=', and Robert F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Ling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “Some Cautionary Notes on the Use of Principal Components Regression.” The American Statistician 52 (1): 15–19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='2307/2685559.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Hastie, Trevor, Jerome Friedman, and Robert Tibshirani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' The Elements of Statistical Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Springer Series in Statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' New York, NY: Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='1007/978-0-387-21606-5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Hotelling, Harold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 1957.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “The Relations of the Newer Multivariate Statistical Methods to Factor Analysis.” British Journal of Statistical Psychology 10 (2): 69–79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='1111/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='2044-8317.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='1957.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='tb00179.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Jolliffe, Ian T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 1982.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “A Note on the Use of Principal Components in Regression.” Journal of the Royal Statistical Society.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Series C (Applied Statistics) 31 (3): 300–303.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='2307/2348005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Liland, Kristian Hovde, Bjørn-Helge Mevik, and Ron Wehrens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Pls: Partial Least Squares and Principal Component Regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Manual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' https://CRAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='R-project.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/package=pls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Mardia, Kanti V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=', J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Kent, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Bibby.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 1980.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Multivariate Analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 1st edition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' London ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' New York: Academic Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Næs, Tormod, and Harald Martens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “Principal Component Regression in NIR Analysis: Viewpoints, Background Details and Selection of Components.” Journal of Chemometrics 2 (2): 155–67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' Pearson, Karl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 1901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' “LIII.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' On Lines and Planes of Closest Fit to Systems of Points in Space.” The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science 2 (11): 559–72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content='1080/14786440109462720.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' 7 Appendix Appendix S1 var(ˆβd) = (X − U kΣkX⊤ V ,k)⊤(X − U kΣkX⊤ V ,k)−1σ2 P C,d = (V dΣdu⊤ d udΣdV ⊤ d )−1σ2 P C,d = (V dΣdΣdV ⊤ d )−1σ2 P C,d = V dΣ−1 d Σ−1 d V ⊤ d σ2 P C,d = V dV ⊤ d (X⊤X)−1V dV ⊤ d σ2 P C,d = (X⊤X)−1V dV ⊤ d V dV ⊤ d σ2 P C,d = (X⊤X)−1V dV ⊤ d σ2 P C,d = var(ˆβ)V dV ⊤ d σ2 P C,d σ2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' (14) Appendix S2 var(ˆβd) = var(ˆβ)(IK − V kV ⊤ k ) σ2 P C,d σ2 = var(ˆβ)(IK − X⊤XV kΣ−2 k V ⊤ k ) σ2 P C,d σ2 = {var(ˆβ) − var(ˆβ)X⊤XV kΣ−2 k V ⊤ k } σ2 P C,d σ2 = {var(ˆβ) − (X⊤X)−1σ2X⊤XV kΣ−2 k V ⊤ k } σ2 P C,d σ2 = {var(ˆβ) − σ2V kΣ−2 k V ⊤ k } σ2 P C,d σ2 = {var(ˆβ) − σ2 σ2 P C,k var(ˆβk)} σ2 P C,d σ2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} +page_content=' (15) 8' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/OdAzT4oBgHgl3EQflP0i/content/2301.01543v1.pdf'} diff --git a/PtE3T4oBgHgl3EQfxwuM/content/tmp_files/2301.04714v1.pdf.txt b/PtE3T4oBgHgl3EQfxwuM/content/tmp_files/2301.04714v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..4c62c4b6845c10c75de17726ba5ae348887543bb --- /dev/null +++ b/PtE3T4oBgHgl3EQfxwuM/content/tmp_files/2301.04714v1.pdf.txt @@ -0,0 +1,1261 @@ +arXiv:2301.04714v1 [math.AT] 11 Jan 2023 +RO(C2)-GRADED COHOMOLOGY OF C2-EQUIVARIANT +EILENBERG-MAC LANE SPACES +U ˘GUR Y˙I˘G˙IT +Abstract. In this paper, we calculate RO(C2)-graded cohomology of C2- +equivariant Eilenberg-Mac Lane spaces K(Z/2, n + σ) for n ≥ 0. These can be +used to give the relation between equivariant lambda algebra and equivariant +Adams resolution and equivariant unstable Adams spectral sequence, which +are defined in author‘s dissertation. +Contents +1. +Introduction +1 +2. +Preliminaries +2 +3. +C2-Equivariant Steenrod Algebra +6 +4. +Equivariant Eilenberg-Mac Lane Spaces +11 +5. +Cohomology of Eilenberg-Mac Lane Spaces +13 +References +16 +1. Introduction +An ordinary cohomology theory H⋆ +G(− : M) on G-spaces with Mackey functor +M coefficients and graded by real orthogonal representations is defined by Lewis, +May and Mcclure [8]. In this paper, we compute the RO(C2)-graded cohomology +of the C2-equivariant Eilenberg-Mac Lane spaces with the constant Mackey functor +M = Z/2 coefficients, which are crucial to give the relation between the equivari- +ant lambda algebra and the equivariant unstable Adams resolution and equivariant +unstable Adams spectral sequence, which is given by Mahowald [12] in the clas- +sical case. Throughout this paper, H⋆(−) denotes the ordinary RO(C2)-graded +cohomology of a C2-space with the constant Mackey functor coefficients Z/2. +To compute the RO(C2)-graded cohomology of the C2-equivariant Eilenberg- +Mac Lane spaces with the constant Mackey functor M = Z/2 coefficients, we use +Borel theorem 17 for the path-space fibration +ΩK(Z/2, V ) −→ P(K(Z/2, V )) −→ K(Z/2, V ). +for V = σ + n, where n ≥ 0. +Key words and phrases. Equivariant Cohomology, Equivariant Steenrod algebra, Equivariant +Eilenberg-Mac Lane Spaces. +1 + +2 +U ˘GUR Y˙I ˘G˙IT +If we knew H⋆(K(Z/2, nσ)) for n ≥ 2, one could use the Eilenberg-Moore spec- +tral sequence [4, Chapter 5], the Borel theorem, and the RO(G)-graded Serre spec- +tral sequence of Kronholm [7, Theorem 1.2.] for the path-space fibration +ΩK(Z/2, V ) −→ P(K(Z/2, V )) −→ K(Z/2, V ). +This paper is organized as follows. In section 2, we provide the basic equivariant +topology tools, and C2-equivariant cohomology M C2 +2 +of a point, and equivariant +connectivity of G-spaces. In section 3, we descripe equivariant Steenrod squares, +C2-equivariant Steenrod algebra AC2 and axioms of it. In section 4, we give the +definition of the equivariant Eilenberg-Mac Lane spaces with some properties, and +the fixed point sets of the equivariant Eilenberg-Mac Lane spaces that is very useful +to compute the cohomology of them. In section 5, we compute the RO(C2)-graded +C2-equivariant cohomology of some C2-equivariant Eilenberg-Mac Lane spaces KV +for real orthogonal representations V = σ+n, n ≥ 0. Also, we give some conjectures +and future directions for the other cases. +Notation. We provide here notation used in this paper for convenience. +• V = rσ+s, a real orthogonal representation of C2, which is a sum of r-copy +of the sign representation σ and s-copy of the trivial representation 1. +• ρ = σ + 1, the regular representation of C2. +• RO(C2), the real representation ring of C2. +• SV , the equivariant sphere which is the one-point compactification of V . +• πC2 +V (X), the V -th C2-equivariant homotopy group of a topological C2-space +X. +• πS +rσ+s, the C2-equivariant stable homotopy groups of spheres. +• Σσ(X), the σ-th suspension of X. +• Ωσ(X), all continuous functions from Sσ to X. +• H⋆ +G(− : M), RO(G)-graded ordinary equivariant cohomology with Mackey +functor M coefficients. +• M C2 +2 , RO(C2)-graded C2-equivariant cohomology of a point. +• AC2, C2-equivariant Steenrod algebra. +• K(M, V ) or shortly KV , the V th equivariant Eilenberg-Mac Lane space +with a Mackey functor M. +• πG +V (X), C2-equivariant homotopy of a G-space X as a Mackey functor. +• Sqk +C2, C2-equivariant Steenrod squaring operations for k ≥ 0. +• RP ∞ +tw, the space of lines in the complete universe U = (Rρ)∞, which is +equivalent to K(Z/2, σ). +Acknowledgements. I would like to thank Michael A. Hill for valuable conversa- +tions and providing me some suggestions for calculations, and William Kronholm for +producing the action of Steenrod squares on the cohomology ring of RP ∞ +tw. Lastly +and most importanly, I would like to state my gratitude to my advisor, Douglas +C. Ravenel, for his patience, support, and encouragement throughout my graduate +studies, and numerous beneficial conversations and suggestions. The work in this +paper was part of the author’s dissertation while at the University of Rochester. +2. Preliminaries +In this section we give the main tools that are used in the rest of the article. +Let X be a G-space, where G = C2 is a cyclic group with generator γ such that +γ2 = e. The group C2 has two irreducible real representations, namely the trivial + +COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES +3 +representation denoted by 1 (or R) and the sign representation denoted by σ (or +R−). The regular representation is isomorphic to ρC2 = 1 + σ (it is denoted by ρ if +there is no confusion). Thus the representation ring RO(C2) is free abelian of rank +2, so every representation V can be expressed as V = rσ + s. +Definition 1. A G-universe is a countably infinite-dimensional G-representation +which contains the trivial G-representation and which contains infinitely many +copies of each of its finite-dimensional subrepresentations. +Also, a +complete +G-universe is just a G-universe that contains infinitely many copies of every irre- +ducible G-representation. +Definition 2. A G-spectrum E on a G-universe U is a collection EV of based +G-spaces together with basepoint-preserving G-maps +σV,W : ΣW−V EV −→ EW +whenever V ⊂ W ⊂ U, where W − V denotes the orthogonal complement of V in +W. It is required that σV,V is identity, and the commutativity of the diagram +ΣW−V ΣV −UEU +ΣW−V EV +EW +ΣW −V σU,V +σU,W +σV,W +for U ⊂ V ⊂ W ⊂ U. +Definition 3. If the adjoint structure maps +˜σV,W : EV −→ ΩW−V EW +are weak homotopy equivalences for V ⊂ W ⊂ U, then a G-spectrum is called +G − Ω-spectrum. +A G-spectrum indexed on a complete(trivial) G-universe is called genuine(naive). +For an actual representation V of G and a G-space X, the V -th homotopy group +of X is the Mackey functor πV (X) determined by +πV (X)(G/H) = [SV , X]H +for every H < G. +For a virtual representation V ∈ RO(G) and a G-spectrum E, the V -th homo- +topy group of E is the Mackey functor πV (E) determined by +πV (E)(G/H) = colimnπ0(ΩV +WnEWn)H +where {Wn|n ∈ N} is an increasing sequence of representations +· · · ⊂ Wn ⊂ Wn+1 ⊂ · · · +such that any finite dimensional representation V of G admits an equivariant em- +bedding in some Wn. +Lewis, May and Mcclure [8] defined an ordinary cohomology theory H⋆ +G(− : M) +on G-spaces with Mackey functor M coefficients and the graded by real orthogonal +representations. + +4 +U ˘GUR Y˙I ˘G˙IT +Throughout this paper, the Mackey functor will typically be the constant Mackey +functor M = Z/2, which can be given the following diagram in Lewis notation. +(2.1) +Z/2 +Id � +Z/2 +Id +� +0 +� +The ordinary equivariant cohomology M C2 +2 +of a point with this coefficient is +given in the Figure 1 below. Every • in the figure represents a copy of Z/2. +As you see in the Figure 1 below, there are two elements of interest. The inclusion +map of the fixed point set (the north and south poles) a : S0 −→ Sσ defines an +element in πC2 +−σ(S−0), and we will use the same symbol for its mod 2 Hurewicz +image. It is called an Euler class. One can show that +HC2 +1 (Sσ; Z/2) = HC2 +1−σ(S−0; Z/2) = Z/2 +and we denote its generator by u. Dually, we have a ∈ Hσ +C2(S−0; Z/2) and u ∈ +Hσ−1 +C2 (S−0; Z/2). These are the analog of elements ρ and τ in real motivic homotopy +theory, respectively. +−6 +−4 +−2 +0 +2 +4 +6 +−6 +−4 +−2 +0 +2 +4 +6 +1 +u +u2 +a +ua +a2 +θau +θ +au2 +θ +a2u +Figure 1. The equivariant cohomology M C2 +2 +of a point +The coordinate (x, y) represents degree (x − y) + σy, which is convenient with +the motivic bidegree. +Red and blue lines represent multiplication by u and a, +respectively. +Now, we will give the definition of equivariant connectivity of G-spaces. + +COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES +5 +Definition 4. [10] +(i) A function ν∗ from the set of conjugacy classes of subgroups of G to the +integers is called a dimension function. The value of ν∗ on the conju- +gacy class of K ⊂ G is denoted by νK. Let ν∗ and µ∗ be two dimension +functions. If νK ≥ µK for every subgroup K, then ν∗ ≥ µ∗. Associated +to any G-representation V is the dimension function |V ∗| whose value at +K is the real dimension of the K-fixed subspace V K of V . The dimension +function with constant integer value n is denoted n∗ for any integer n. +(ii) Let ν∗ be a non-negative dimension function. +If for each subgroup K +of G, the fixed point space Y K is νK-connected, then a G-space Y is +called G-ν∗-connected. +If A G-space Y is G-0∗-connected, then it is +called G-connected. +Also, if it is G-1∗-connected, it is called simply +G-connected. A G-space Y is homologically G-ν∗-connected if, for +every subgroup K of G and every integer m with 0 ≤ m ≤ νK , the homol- +ogy group HK +m(Y ) is zero. +(iii) Let ν∗ be a non-negative dimension function and let f : Y −→ Z be a +G-map. If, for every subgroup K of G, +(f K)∗ : πm(Y k) −→ πm(ZK) +is an isomorphism for every integer m with 0 ≤ m < νK and an epi- +morphism for m = νK, then f is called G-ν∗-equivalence. +A G-pair +(Y, B) is said to be G-ν∗-connected if the inclusion of B into Y is a +G-ν∗-equivalence. The notions of homology G-ν∗-equivalence and of +homology G-ν∗-connectedness for pairs are defined similarly, but with +homotopy groups replaced by homology groups. +(iv) Let V be a G-representation. For each subgroup K of G, let V (K) be +the orthogonal complement of V K ; then V (K) is a K-representation. If +πK +V (K)+m(Y ) is zero for each subgroup K of G and each integer m with +0 ≤ m ≤ |V K|, the G-space Y is called G-V -connected. +Similarly, +if HG +V (K)+m(Y ) is zero for each subgroup K of G and each integer m +with 0 ≤ m ≤ |V K|, then the G-space Y is called homologically G- +V -connected. +(v) Let V be a G-representation. A G-0∗-equivalence f : Y −→ Z is said to be +a G-V -equivalence if, for every subgroup K of G, the map +f∗ : πK +V (K)+m(Y ) −→ πK +V (K)+m(Z) +is an isomorphism for every integer m with 0 ≤ m < |V K| and an epimor- +phism for m = |V K|. A homology G-V -equivalence is defined similarly. +A G-pair (Y, B) is called G-V -connected (respectively, homologically +G-V -connected) if the inclusion of B into Y is a G-V -equivalence (re- +spectively, homology G-V -equivalence). + +6 +U ˘GUR Y˙I ˘G˙IT +3. C2-Equivariant Steenrod Algebra +The analog of the mod 2 Steenrod algebra is defined by Voevodsky [19] in the +motivic case, and Po Hu and Igor Kriz [5] in the equivariant case. The two descrip- +tions are essentially the same. +One has squaring operations Sqk +C2 for k ≥ 0, whose degrees +|Sqk +C2| = +� +i(1 + σ) +for k = 2i +i(1 + σ) + 1 +for k = 2i + 1. +Sq0 +C2 = 1 as in the classical case. The C2-equivariant Steenrod algebra acts on +the coefficient ring M C2 +2 +by +(3.1) +Sqk +C2(u) = + + + +u +for k = 0 +a +for k = 1 +0 +else. +(3.2) +Sq2m+δ +C2 +(u2l+ǫ) = +� 2l + ǫ +2m + δ +� +u2l+ǫ−m−δa2m+δ +The difficulty in deriving the formula 3.2 is the C2-equivariant Cartan formula 3.7, +3.8. Since +|Sq2m+δ +C2 +| = m(1 + σ) + δ +for 0 ≤ δ ≤ 1, +we have +(3.3) + + +∆(Sq2m+1 +C2 +) = � +0≤i≤2m+1 Sqi +C2 ⊗ Sq2m+1−i +C2 +∆(Sq2m +C2 ) = � +0≤j≤m Sq2i +C2 ⊗ Sq2m−2j +C2 ++ u � +1≤j≤m Sq2j−1 +C2 +⊗ Sq2m−2j+1 +C2 +. +The terms divisible by u make things difficult. Here we are using cohomological +degree, so |u| = σ − 1. Note that +|u−mSq2m+δ +C2 +| = m(1 − σ) + m(1 + σ) + δ = 2m + δ +and define +Sq2m+δ := u−mSq2m+δ +C2 +. +We will see that these operations satisfy the classical Cartan formula. We have +∆(Sq2m+1) = u−m∆(Sq2m+1 +C2 +) += u−m +� +0≤i≤2m+1 +Sqi +C2 ⊗ Sq2m+1−i +C2 += +� +0≤i≤2m+1 +u−⌊i/2⌋Sqi +C2 ⊗ u−⌊(2m+1−i)/2⌋Sq2m+1−i +C2 += +� +0≤i≤2m+1 +Sqi +C2 ⊗ Sq2m+1−i +C2 +since ⌊i/2⌋ + ⌊(2m + 1 − i)/2⌋ = m. And also, +∆(Sq2m) = u−m∆(Sq2m +C2 ) + +COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES +7 += u−m +� +0≤j≤m +Sq2j +C2 ⊗ Sq2m−2j +C2 ++ u1−m +� +1≤j≤m +Sq2j−1 +C2 +⊗ Sq2m−2j+1 +C2 += +� +0≤j≤m +u−jSq2j +C2 ⊗ uj−mSq2m−2j +C2 ++ +� +1≤j≤m +u1−jSq2j−1 +C2 +⊗ uj−mSq2m−2j+1 +C2 += +� +0≤j≤m +Sq2j +C2 ⊗ Sq2m−2j +C2 ++ +� +1≤j≤m +Sq2j−1 +C2 +⊗ Sq2m−2j+1 +C2 += +� +0≤i≤2m +Sqi +C2 ⊗ Sq2m−i +C2 +. +Now, if we use homological degree, then +|Sqm| = −m, |a| = −σ, and |u| = 1 − σ. +We know that +(3.4) +Sqm +C2(u) = + + + +u +for m = 0 +a +for m = 1 +0 +else. +Consider the total Steenrod operation +(3.5) +Sqt = +� +i≥0 +tiSqi, +where t is a dummy variable. Although this sum is infinite, it yields a finite sum +when applied to any monomial in a and u. The classical Cartan formula satisfied +by operations Sqi implies that it is a ring homomorphism, meaning that +Sqt(xy) = Sqt(x)Sqt(y). +Then 3.4 implies that +Sqt(u) = u + ta +Sqt(ul) = (u + ta)l += +� +0≤m≤l +� l +m +� +tmul−mam += +� +0≤m≤l +tmSqm(ul). +Hence, Sqm(ul) is the coefficient of tm in the first sum above. +It follows that +Sq2m+δ(u2l+ǫ) = +� 2l + ǫ +2m + δ +� +u2l+ǫ−2m−δa2m+δ +Sq2m+δ(u2l+ǫ) = umSq2m+δ(u2l+ǫ) += +� 2l + ǫ +2m + δ +� +u2l+ǫ−m−δa2m+δ. +As a result, we have the following: +Lemma 5. +Sq2m+δ +C2 +(u2l+ǫ) = +� 2l + ǫ +2m + δ +� +u2l+ǫ−m−δa2m+δ. + +8 +U ˘GUR Y˙I ˘G˙IT +The natural action of the Steenrod algebra in homology is on the right, not on +the left. Classically, the +mod p cohomology of a space or a spectrum X is a left +module over the Steenrod algebra A, so there is a map +cX : A ⊗ H∗X → H∗X. +The Steenrod algebra has a multiplication +φ∗ : A ⊗ A → A +(the symbol φ∗ and its dual φ∗ are taken from Milnor’s paper [14]) and the following +diagram commutes +(3.6) +A ⊗ A ⊗ H∗X +A ⊗ H∗X +A ⊗ H∗X +H∗X. +φ∗⊗H∗X +A⊗cX +cX +cX +Milnor defines a right action of A on H∗X by the rule +⟨xa, y⟩ = ⟨x, ay⟩ ∈ Fp +for x ∈ H∗X, a ∈ A and y ∈ H∗X, where the brackets denotes the evaluation of +the cohomology class on the right on the homology class on the left. Milnor denotes +by λ∗ the resulting map +H∗X ⊗ A → H∗X. +The same thing happens in the C2-equivariant case. For example, we have +(u2)Sq3 +C2 = (u2)Sq1 +C2Sq2 +C2 = 0 +because (u2)Sq1 +C2 = 0. And, +(u2)χ(Sq3 +C2) = (u2)Sq2 +C2Sq1 +C2 = (ua2)Sq1 +C2 = a3, +where χ(−) means the conjugate Steenrod operations. Hence, 3.2 should really +read as +(u2l+ǫ)Sq2m+δ +C2 += +� 2l + ǫ +2m + δ +� +u2l+ǫ−m−δa2m+δ. +For example, +Sql +C2(u−1) = +�−1 +l +� +alu−1−l += + + + + + + + + + + + + + + + + + + + +�−1 +0 +� +u−1 = u−1 +for l = 0 +�−1 +1 +� +au−2 = au−2 +for l = 1 +�−1 +2 +� +a2u−3 = a2u−3 +for l = 2 +0 +for l ≥ 3 +Action on the other elements is determined by the Cartan formula (iv) given +below. We now give axioms for the squares Sqk +C2. For the motivic case, you can +check Voevodsky paper [19]. But, the Adem relation is fixed by Jo¨el Riou in [17]. +(i) Sq0 +C2 = 1 and Sq1 +C2 = βC2, Bockstein homomorphism. +(ii) βSq2k +C2 = Sq2k+1 +C2 +. + +COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES +9 +(iii) βSq2k+1 +C2 += 0. +(iv) (Cartan formula) +(3.7) +Sq2k +C2(xy) = +k +� +r=0 +Sq2r +C2(x)Sq2k−2r +C2 +(y) + u +k−1 +� +s=0 +Sq2s+1 +C2 +(x)Sq2k−2s−1 +C2 +(y) +(3.8) +Sq2k+1 +C2 +(xy) = +2k+1 +� +r=0 +Sqr +C2(x)Sq2k+1−r +C2 +(y) + a +k−1 +� +s=0 +Sq2s+1 +C2 +(x)Sq2k−2s−1 +C2 +(y) +(v) (Adem relation) If 0 < i < 2j, then when i + j is even +Sqi +C2Sqj +C2 = +[i/2] +� +k=0 +�b − 1 − k +i − 2k +� +uǫSqi+j−k +C2 +Sqk +C2 +where +ǫ = +� +1 +for k is odd and i and j are even +0 +else +when i + j is odd +Sqi +C2Sqj +C2 = +[i/2] +� +k=0 +�j − 1 − k +i − 2k +� +Sqi+j−k +C2 +Sqk +C2 + a +� +k=odd +ε Sqi+j−k +C2 +Sqk +C2 +where +ε = +� �j−1−k +i−2k +� +for i is odd +� j−1−k +i−2k−1 +� +for j is odd +(vi) If x has a degree kσ + k, then Sq2k +C2(x) = x2. +(vii) (instability) If x has a degree V , V < kσ + k then Sq2k +C2(x) = 0, where +V < V +′ if and only if V +′ = V + W for some actual representations W with +positive degree. +Note that setting u = 1 and a = 0 reduces the Cartan formula (iv) to the classical +Cartan formula, and Adem relation (v) to the classical Adem relation. +Examples 6. We have +Sq1 +C2Sqn +C2 = +� +Sqn+1 +C2 +for n is even +0 +for n is odd +Sq2 +C2Sqn +C2 = + + + + + + + +Sqn+2 +C2 ++ uSqn+1 +C2 Sq1 +C2 +for n ≡ 0 mod 4 +Sqn+1 +C2 Sq1 +C2 +for n ≡ 1 mod 4 +uSqn+1 +C2 Sq1 +C2 +for n ≡ 2 mod 4 +Sqn+2 +C2 ++ Sqn+1 +C2 Sq1 +C2 +for n ≡ 3 mod 4 +and +Sq3 +C2Sqn +C2 = + + + + + + + +Sqn+3 +C2 ++ aSqn+1 +C2 Sq1 +C2 +for n ≡ 0 mod 4 +Sqn+2 +C2 Sq1 +C2 +for n ≡ 1 mod 4 +aSqn+1 +C2 Sq1 +C2 +for n ≡ 2 mod 4 +Sqn+2 +C2 Sq1 +C2 +for n ≡ 3 mod 4 + +10 +U ˘GUR Y˙I ˘G˙IT +Now, let SqI +C2 denote Sqi1 +C2Sqi2 +C2 · · · Sqin +C2 for a sequence of integers I = (i1, · · · , in). +The sequence I is said to be admissible if is ≥ 2is+1 for all s ≥ 1, where is+1 = 0. +The operations SqI +C2 with admissible I are called admissible monomials. We also +call Sq0 +C2 admissible, where Sq0 +C2 = SqI +C2 for empty I. +Lemma 7. The admissible monomials form a basis for the C2-equivariant Steenrod +algebra AC2 as a H⋆(pt)-module. +Proof. The proof follows from the Adem relations and the Cartan formula as in the +classical case. +□ +For the graded AC2-module structure and Hopf algebra structure of equivariant +Steenrod algebra, one can look [16]. We will now give unstable module structure +of it. +Definition 8. An AC2-module is unstable if it satisfies the preceeding instability +condition (vii). +We define the excess of Sqk +C2 to be the degree of Sqk +C2 +e(Sqk +C2) = +� +iρ +for k = 2i +iρ + 1 +for k = 2i + 1. +So, e(Sqk +C2) = |Sqk +C2|. Then the excess of SqI +C2 = Sqi1 +C2Sqi2 +C2 · · · Sqik +C2 to be +e(SqI +C2) = +� +j +e(Sqij +C2) − ρe(Sqij+1 +C2 ) +where ρ(rσ + s) = (r + s)ρ. +Examples 9. +• The monomial with e(SqI +C2) = 0 is Sq0 +C2. +• The monomials with e(SqI +C2) = 1 are Sq1 +C2, Sq2 +C2Sq1 +C2, Sq4 +C2Sq2 +C2Sq1 +C2, · · · +• There is no monomial with e(SqI +C2) = σ. +• The monomials with e(SqI +C2) = 2 are Sq3 +C2Sq1 +C2, Sq6 +C2Sq3 +C2Sq1 +C2, Sq12 +C2Sq6 +C2Sq3 +C2- +Sq1 +C2, · · · +• The monomials with e(SqI +C2) = ρ are Sq2 +C2, Sq4 +C2Sq2 +C2, Sq8 +C2Sq4 +C2Sq2 +C2, · · · +• There is no monomial with e(SqI +C2) = 2σ, +• The monomials with e(SqI +C2) = 3 are Sq7 +C2Sq3 +C2Sq1 +C2, Sq11 +C2Sq5 +C2Sq2 +C2Sq1 +C2, +· · · +• The monomials with e(SqI +C2) = 2 + σ are Sq3 +C2, Sq4 +C2Sq1 +C2, Sq5 +C2Sq2 +C2, +Sq6 +C2Sq3 +C2, Sq6 +C2Sq2 +C2Sq1 +C2, Sq8 +C2Sq4 +C2Sq1 +C2, · · · +• There is no monomial with e(SqI +C2) = 1 + 2σ. +Remark 10. There is no monomial with e(SqI +C2) = rσ + s if r > s. +Let tj,k = Sqj2k−1 +C2 +· · · Sqj +C2. Then the set of elements with total excess 1 is +{t1,k1|k1 > 0} . +The set of elements with total excess 2 is +� +t1+2k1 ,k2+1t1,k1|k1, k2 ≥ 0 +� +. + +COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES +11 +The set of elements with total excess 3 is +� +t1+2k2 +2k1+k2 ,k3+1t1+2k1 ,k2t1,k1|k1, k2, k3 ≥ 0 +� +. +The C2-equivariant mod 2 dual Steenrod algebra (one can check [16], or [5] for +details) is +AC2 = M C2 +2 [τi, ξi]/(τ 2 +i + aτi+1ηR(u)ξi+1) +such that +ηR(u) = u + aτ0 +ηR(a) = a +|ξi| = (2i − 1)ρ +|τi| = 1 + |ξi| +∆(ξi) = +i +� +j=0 +ξ2j +i−j ⊗ ξj, where ξ0 = 1 +∆(τi) = τi ⊗ 1 + +i +� +j=0 +ξ2j +i−j ⊗ τj. +4. Equivariant Eilenberg-Mac Lane Spaces +For each Mackey functor M, there is an Eilenberg-Mac Lane G-spectrum HM +which has the property as Mackey functors +πG +n (HM) = +� +M +n = 0 +0 +n ∈ Z, n ̸= 0 +One can check [13, Chapter XIII, page 162] for the proof of the existence. +Let M be a Mackey functor, the V th space in the Ω-spectrum for HM is called +an equivariant Eilenberg-Mac Lane space of type K(M, V ), which is a classifying +space for the functor HV +G(−; M). That is, given any real orthogonal representations +V , W, there is a G-homotopy equivalence K(M; V ) ≃ ΩW K(M, V +W) satisfying +various compatibility properties. Such spaces are constructed in [9], or one can look +[3] for a construction with a different method. Here, I will give the definition of +them for consistency. +Definition 11. [9] Let V be a real orthogonal representation with |V G| ≥ 1 and +M be a Mackey functor. An equivariant Eilenberg-Mac Lane space K(M, V ) is a +based, (|V ∗|−1)-connected G-space with the G-homotopy type of a G-CW complex +such that πG +V (K(M, V )) = M, and for πG +V +k(K(M, V )) = 0 k ̸= 0. +Remark 12. One can ask what πG +V +nσ(K(M, V )) is for n > 0. Our main interest +is K(Z/2, V ). Then, +πC2 +V +nσ(K(Z/2, V ))(C2/e) = πe +V +nσ(K(Z/2, V )) = 0 +and +πC2 +V +nσ(K(Z/2, V ))(C2/C2) = πC2 +V +nσ(K(Z/2, V )) +∼= ˜HC2 +V +nσ(SV ; Z/2) + +12 +U ˘GUR Y˙I ˘G˙IT +∼= ˜HC2 +nσ(S0,0; Z/2) +∼= HC2 +nσ(∗; Z/2) +So, as a Mackey functor, the homotopy πG +V +nσ(K(M, V )) is one of the +Z/2 +� +0 +Id +� +� +or +0 +� +0 +Id +� +� +depending on the dimension of the representation V and n. +As mentioned before, one can check [9] for existence and some properties of these +spaces. +Another approach to construct equivariant Eilenberg-Mac Lane spaces is Dos +Santos [3] approach. As we know in the classical case, the free abelian group on +the n-sphere is a model for the Eilenberg-Mac Lane space K(Z, n), and the free F2- +vector space on the n-sphere is a model for the Eilenberg-Mac Lane space K(F2, n). +Dos Santos constructed a topological abelian group M ⊗ X in [3, Definition 2.1.], +which is the equivariant generalization of previous sentence for a Mackey functor M, +and proved an RO(G)-graded version of equivariant Dold-Thom theorem proved by +Lima-Filho for Z-graded case in [11]. +Let M be a Z[G]-module, M be the Mackey functor associated to M: the value of +M on G/H is M H and the value on the projection G/K −→ G/H, for K < H < G, +is the inclusion of M H ֒→ M K. We define M ⊗ X as the Z[G]-module with a +topology as follows([3, Definition 2.1.]): Let (X, ∗) be a based G-set, M ⊗X denote +the Z[G]-module � +x∈X−{∗} M. The action of g ∈ G is given by (g.m)x = g.mg−1.x, +where mx denotes the xth coordinate of m ∈ � +x∈X−{∗} M. Given (X, ∗) a based +G-space, M ⊗ X can be equivalently defined as the quotient +M ⊗ X = ∐n≥0M n × Xn/ ∽, +where ∽ is the equivalence relation generated by: +(i) (r, φ∗x) ∽ (φ∗r, x), for each based map φ : {0, · · · , n} −→ {0, · · · , m}, +n, m ∈ N, where φ∗x = x ◦ φ, and (φ∗r)i = � +k∈φ−1(i) rk. +(ii) ((r, r′), (x, ∗)) ∽ (r, x), for each r ∈ M n, r′ ∈ M, x ∈ X. +We give the discrete topology to M and endow M ⊗ X with the quotient topology +corresponding to the relation ∽. +We can define Eilenberg-Mac Lane spaces as KV = M ⊗ SV . In our case, +Km+nσ = Z/2 ⊗ Sm+nσ. +Theorem 13. [3] Let X be a based G-CW-complex and let V be a finite dimensional +G-representation, then M ⊗ X is an equivariant infinite loop space and there is a +natural equivalence +πG +V (M ⊗ X) ∼= ˜HG +V (X; M) +As a corollary to this theorem we have that M ⊗ SV is a K(M, V ) space (as +Definition 11). Thus we have a simple model for the equivariant Eilenberg-Mac +Lane spectrum HM. + +COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES +13 +Examples 14. +(i) K(Z/2, 1) is RP ∞, with trivial action. +(ii) Recall that RP ∞ +tw = P(U) is the space of lines in the complete universe +(Definition 1) +U = (Rρ)∞ +[13]. The cohomology of RP ∞ +tw is calculated by Kronholm in [?]. The space +RP ∞ +tw is equivalent to K(Z/2, σ), since it is equivalent to Z/2 ⊗ Sσ. +Theorem 15. [6] H⋆(RP ∞ +tw) ∼= H⋆(pt)[c, d]/(c2 = ac + ud), where deg(c)=σ, and +deg(d)=ρ. +Now, we will give a structure of fixed points of equivariant Eilenberg-Mac Lane +spaces, which is useful to calculate the cohomology of them. +Theorem 16. [2, Corollary 10] +(i) (K(Z/2, rσ + s))e ≃ K(Z/2, r + s). +(ii) (K(Z/2, rσ + s))C2 ≃ K(Z/2, s) × · · · × K(Z/2, r + s). +5. Cohomology of Eilenberg-Mac Lane Spaces +In classical case the cohomology of Eilenberg- Mac Lane spaces Kn with Z/2- +coefficients, which is given by Serre in [18] is a polynomial ring +H∗(Kn; Z/2) = P(SqI(ιn)|e(I) < n) +where I are admissible sequences, ιn is the fundamental class, and e(SqI) = � +j(ij− +2ij+1). We thought that we can give similar description for RO(C2)−graded C2- +equivariant cohomology of C2-equivariant Eilenberg-Mac Lane spaces, but these are +more complicated than we expect. +Let sV,l is the operation that sends x to x2l for x ∈ HV . It is possible to express +sV,l as a linear combination of Steenrod operations. +sV,0 = 1 +If x ∈ Ha+bσ, and b = r1 + ⌊ a+b +2 ⌋, then (u−r1x)2 = Sqa+b +C2 (u−r1x), so +x2 = u2r1Sqa+b +C2 (u−r1x) +By using C2-equivariant Cartan formula and the formula 3.2 +Sq2m+δ +C2 +(u2l+ǫ) = +� 2l + ǫ +2m + δ +� +u2l+ǫ−m−δa2m+δ +one has general formula for Sqa+b +C2 (u−r1x). By iterating this method one can find a +formula for every x2l, so sV,l exist. For example, if x ∈ H3+σ, then +(ux)2 = Sq4 +C2(ux) = +2 +� +r=0 +Sq2r +C2(u)Sq4−2r +C2 +(x) + +1 +� +s=0 +Sq2s+1 +C2 +(u)Sq3−2s +C2 +(x) += uSq4 +C2(x) + uaSq3 +C2(x) +Thus +x2 = u−1Sq4 +C2(x) + u−1aSq3 +C2(x). + +14 +U ˘GUR Y˙I ˘G˙IT +The set of elements xi whose finite distinct products form a basis for a graded +ring A is called a simple system of generators. +For example, a polynomial +algebra k[x] has a simple system of generators +� +x2i| i ≥ 0 +� +. +Theorem 17. (Borel) Let F → E → B be a C2-fibration with E contractible. +Suppose that H⋆(F) has a simple system {xi} of transgressive generators. Then +H⋆(B) is a polynomial ring in the {Σ(xi)}. +E2-page of RO(G)-graded Serre spectral sequence of Kronholm [7] depends only +on the total degree of representations, not the dimension of twisted part. The proof +of the theorem is completely same as the classical case. See, for example, [15, Page +88, Theorem 1]. +A simple system of generators for H⋆(Kσ) ∼= H⋆(pt)[c, d]/(c2 = ac + ud) is +� +c, d2l|l ≥ 0 +� += {c, s1+σ,l(d)|l ≥ 0} +By applying the Borel Theorem to the path space fibration +Kσ → P(Kρ) → Kρ +we have +H⋆(Kρ) = P(xρ, sρ,l(x1+ρ)|l ≥ 0). +A simple system of generator for H⋆(Kρ) is +� +x2j +ρ , (sρ,l(x1+ρ))2j|j, l ≥ 0 +� += +� +sρ,j(xρ), s2lρ+1,jsρ,l(x1+ρ)|j, l ≥ 0 +� +where |sρ,l(x1+ρ)| = 2lρ + 1. Then, +H⋆(K1+ρ) = P(sρ,j(x1+ρ), s2lρ+1,jsρ,l(x2+ρ)|j, l ≥ 0). +A simple system of generators for H⋆(K1+ρ) is +� +(sρ,jx1+ρ)2k, (s2jρ+1,jsρ,lx2+ρ)2k|j, l, k ≥ 0 +� += +� +s2jρ+1,ksρ,jx2+σ, s2j(2lρ+1)+1,ks2lρ+1,jsρ,lx2+ρ|j, l, k ≥ 0 +� +where |s2lρ+1,jsρ,lx2+ρ| = 2j(2lρ + 1) + 1. Then, +H⋆(K2+ρ) = P(s2jρ+1,ksρ,jx2+ρ, s2j(2lρ+1)+1,ks2lρ+1,jsρ,lx3+ρ|j, l, k ≥ 0). +Thus, by iterating this process, one can find the RO(C2)-graded cohomology of +C2-equivariant Eilenberg-Mac Lane spaces Kn+σ +H⋆(Kn+σ) +for n ≥ 0. +Conjecture 18. We know that +H∗(K1) = P(x1) +H∗(Kσ) = P(xσ, x1+σ)/(x2 +σ + axσ + ux1+σ). +If we knew H⋆(K1+k(σ−1)) for all k ≥ 0, then we could use the Borel theorem to +find H⋆(K1+m+k(σ−1)) for m ≥ 0. +We conjectured that H⋆(K1+k(σ−1)) is a polynomial algebra on k + 1 generators +with k relations, saying that the square of each of the first k generators is a linear +combination of the other generators. The dimensions of the first k generators of + +COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES +15 +the H⋆(K1+k(σ−1)) are obtained by adding σ − 1 to those of the generators of the +H⋆(K1+(k−1)(σ−1)), and the dimension of the last generator is kσ + 2k − k. +Example 19. Let k = 3. Then +H∗(K3σ−2) = P(x3σ−2, x3σ−1, x3σ+1, x3σ+5)/(x2 +3σ−2+· · · , x2 +3σ−1+· · · , x2 +3σ+1+· · · ) +where the other terms in the relations are linear. The resulting simple system of +generators is +{x3σ−2, x3σ−1, x3σ+1} ∪ {x2i +3σ+5|i ≥ 0}. +Now, we give another useful lemma for computations, which is the cohomology +analogous of the Lemma 2.7. in [1]. +There is a forgetful map +Φe : HV +C2(X; Z/2) −→ H|V |(Xe; Z/2) +from the equivariant cohomology to the non-equivariant cohomology with Z/2- +coefficients. And also, we have a fixed point map +ΦC2 : HV +C2(X; Z/2) −→ H|V C2 |(XΦC2; Z/2) +where XΦC2 is a geometric fixed point of a G-space X. Now, I will state the lemma, +whose proof is the analog of Lemma 2.7. in [1]. +Lemma 20. Let X be a genuine C2-spectrum, and suppose that {bi} is a set of +elements of H⋆(X) such that +(i) {Φe(bi)} is a basis of H∗(Xe), and +(ii) {ΦC2(bi)} is a basis of H∗(XΦC2) +Then H⋆(X) is free over H⋆(pt) with the basis {bi}. +One project is to finish calculations of the RO(C2)−graded C2-equivariant co- +homology of C2-equivariant Eilenberg-Mac Lane spaces by using Caruso theorem +16 and lemma 20, and then Eilenberg-Moore spectral sequences of Michael A. Hill +[4, Chapter 5]. +Conjecture 21. H⋆(Krσ+s) is a polynomial algebra on certain C2-equivariant +Steenrod operations SqI +C2(ιrσ+s) divided by certain powers of u, where e(I) < rσ+s, +and ιrσ+s is the fundamental class, and V < V +′ if and only if V +′ = V + W for +some actual representations W with positive degree. +Example 22. H⋆(K(Z/2, 1 + σ)) is the polynomial algebra generated by elements +SqI(ι1+σ), where I is admissible and e(I) < 1 + σ. So, it is a polynomial algebra +P(Sq0(ι1+σ), Sq2Sq1(ι1+σ), Sq4Sq2Sq1(ι1+σ), · · · ). +Then, it is shortly +P(xρ, x1+2ρ, x1+4ρ, x1+8ρ, · · · ). +By now, we have calculated the cohomology of K(Z/2, n + σ) for n ≥ 0. To +calculate other cases, if knew H∗(Knσ) for n ≥ 2, we could use the Eilenberg- +Moore spectral sequence [4, Chapter 5] anf the Borel theorem for the path-space +fibration +ΩK(Z/2, V ) −→ P(K(Z/2, V )) −→ K(Z/2, V ). + +16 +U ˘GUR Y˙I ˘G˙IT +For example, for the path-space fibration +K(Z/2, σ) −→ P(K(Z/2, 1 + σ)) −→ K(Z/2, 1 + σ). +E∞-term of the Eilenberg-Moore spectral sequence is +E∞ = E(xσ, xρ, x2ρ, x4ρ, · · · ) +with the relations +x2 +σ = axσ + ux1+σ +x2 +1+σ = x2+2σ +x2 +2+2σ = x4+4σ +... +x2 +2iρ = x2i+1ρ +... +for i ≥ 0. As a result, H⋆(K(Z/2, σ), Z/2) is a quadratic extension of a polynomial +algebra, as it has already known. +References +[1] Mark Behrens and Dylan Wilson. A c2-equivariant analog of mahowald’s thom spectrum +theorem. Proceedings of the American Mathematical Society, 146, 07 2017. +[2] Jeffrey L. Caruso. Operations in equivariant Z/p-cohomology. Math. Proc. Cambridge Philos. +Soc., 126(3):521–541, 1999. +[3] Pedro F. dos Santos. A note on the equivariant Dold-Thom theorem. J. Pure Appl. Algebra, +183(1-3):299–312, 2003. +[4] Michael A. Hill. Freeness and equivariant stable homotopy. Journal of Topology, 15(2):359– +397. +[5] Po Hu and Igor Kriz. Real-oriented homotopy theory and an analogue of the Adams-Novikov +spectral sequence. Topology, 40(2):317–399, 2001. +[6] William C. Kronholm. A freeness theorem for RO(Z/2)-graded cohomology. Topology Appl., +157(5):902–915, 2010. +[7] William C. Kronholm. The RO(G)-graded Serre spectral sequence. Homology Homotopy +Appl., 12(1):75–92, 2010. +[8] G. Lewis, J. P. May, and J. McClure. Ordinary RO(G)-graded cohomology. Bull. Amer. +Math. Soc. (N.S.), 4(2):208–212, 1981. +[9] L. Gaunce Lewis, Jr. Equivariant Eilenberg-Mac Lane spaces and the equivariant Seifert-van +Kampen and suspension theorems. Topology Appl., 48(1):25–61, 1992. +[10] L. Gaunce Lewis, Jr. The equivariant Hurewicz map. Trans. Amer. Math. Soc., 329(2):433– +472, 1992. +[11] P. Lima-Filho. On the equivariant homotopy of free abelian groups on G-spaces and G-spectra. +Math. Z., 224(4):567–601, 1997. +[12] Mark Mahowald. The image of J in the EHP sequence. Ann. of Math. (2), 116(1):65–112, +1982. +[13] J. P. May. Equivariant homotopy and cohomology theory, volume 91 of CBMS Regional +Conference Series in Mathematics. Published for the Conference Board of the Mathematical +Sciences, Washington, DC, 1996. With contributions by M. Cole, G. Comeza˜na, S. Costenoble, +A. D. Elmendorf, J. P. C. Greenlees, L. G. Lewis, Jr., R. J. Piacenza, G. Triantafillou, and +S. Waner. +[14] John Milnor. The Steenrod algebra and its dual. Ann. of Math. (2), 67:150–171, 1958. +[15] Robert E. Mosher and Martin C. Tangora. Cohomology operations and applications in ho- +motopy theory. Harper & Row, Publishers, New York-London, 1968. +[16] Nicolas Ricka. Subalgebras of the Z/2-equivariant Steenrod algebra. Homology Homotopy +Appl., 17(1):281–305, 2015. +[17] Jo¨el Riou. Op´erations de Steenrod motiviques. https://arxiv.org/abs/1207.3121, 2012. + +COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES +17 +[18] Jean-Pierre Serre. Repr´esentations lin´eaires des groupes finis. Hermann, Paris, 1967. +[19] Vladimir Voevodsky. Reduced power operations in motivic cohomology. Publ. Math. Inst. +Hautes ´Etudes Sci., (98):1–57, 2003. +Istanbul Medeniyet University, Istanbul, TURKEY +Email address: ugur.yigit@medeniyet.edu.tr +Current address: Department of Mathematics, Istanbul Medeniyet University, H1-20, Istanbul, +TURKEY 34700 + diff --git a/PtE3T4oBgHgl3EQfxwuM/content/tmp_files/load_file.txt b/PtE3T4oBgHgl3EQfxwuM/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..78d9fc0327b2c83f421faf92d32f4d4ba30c289d --- /dev/null +++ b/PtE3T4oBgHgl3EQfxwuM/content/tmp_files/load_file.txt @@ -0,0 +1,529 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf,len=528 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='04714v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='AT] 11 Jan 2023 RO(C2)-GRADED COHOMOLOGY OF C2-EQUIVARIANT EILENBERG-MAC LANE SPACES U ˘GUR Y˙I˘G˙IT Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' In this paper, we calculate RO(C2)-graded cohomology of C2- equivariant Eilenberg-Mac Lane spaces K(Z/2, n + σ) for n ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' These can be used to give the relation between equivariant lambda algebra and equivariant Adams resolution and equivariant unstable Adams spectral sequence, which are defined in author‘s dissertation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Contents 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Introduction 1 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Preliminaries 2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' C2-Equivariant Steenrod Algebra 6 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Equivariant Eilenberg-Mac Lane Spaces 11 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Cohomology of Eilenberg-Mac Lane Spaces 13 References 16 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Introduction An ordinary cohomology theory H⋆ G(− : M) on G-spaces with Mackey functor M coefficients and graded by real orthogonal representations is defined by Lewis, May and Mcclure [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' In this paper, we compute the RO(C2)-graded cohomology of the C2-equivariant Eilenberg-Mac Lane spaces with the constant Mackey functor M = Z/2 coefficients, which are crucial to give the relation between the equivari- ant lambda algebra and the equivariant unstable Adams resolution and equivariant unstable Adams spectral sequence, which is given by Mahowald [12] in the clas- sical case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Throughout this paper, H⋆(−) denotes the ordinary RO(C2)-graded cohomology of a C2-space with the constant Mackey functor coefficients Z/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' To compute the RO(C2)-graded cohomology of the C2-equivariant Eilenberg- Mac Lane spaces with the constant Mackey functor M = Z/2 coefficients, we use Borel theorem 17 for the path-space fibration ΩK(Z/2, V ) −→ P(K(Z/2, V )) −→ K(Z/2, V ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' for V = σ + n, where n ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Key words and phrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Equivariant Cohomology, Equivariant Steenrod algebra, Equivariant Eilenberg-Mac Lane Spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 1 2 U ˘GUR Y˙I ˘G˙IT If we knew H⋆(K(Z/2, nσ)) for n ≥ 2, one could use the Eilenberg-Moore spec- tral sequence [4, Chapter 5], the Borel theorem, and the RO(G)-graded Serre spec- tral sequence of Kronholm [7, Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='] for the path-space fibration ΩK(Z/2, V ) −→ P(K(Z/2, V )) −→ K(Z/2, V ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' This paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' In section 2, we provide the basic equivariant topology tools, and C2-equivariant cohomology M C2 2 of a point, and equivariant connectivity of G-spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' In section 3, we descripe equivariant Steenrod squares, C2-equivariant Steenrod algebra AC2 and axioms of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' In section 4, we give the definition of the equivariant Eilenberg-Mac Lane spaces with some properties, and the fixed point sets of the equivariant Eilenberg-Mac Lane spaces that is very useful to compute the cohomology of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' In section 5, we compute the RO(C2)-graded C2-equivariant cohomology of some C2-equivariant Eilenberg-Mac Lane spaces KV for real orthogonal representations V = σ+n, n ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Also, we give some conjectures and future directions for the other cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We provide here notation used in this paper for convenience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' V = rσ+s, a real orthogonal representation of C2, which is a sum of r-copy of the sign representation σ and s-copy of the trivial representation 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' ρ = σ + 1, the regular representation of C2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' RO(C2), the real representation ring of C2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' SV , the equivariant sphere which is the one-point compactification of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' πC2 V (X), the V -th C2-equivariant homotopy group of a topological C2-space X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' πS rσ+s, the C2-equivariant stable homotopy groups of spheres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Σσ(X), the σ-th suspension of X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Ωσ(X), all continuous functions from Sσ to X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' H⋆ G(− : M), RO(G)-graded ordinary equivariant cohomology with Mackey functor M coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' M C2 2 , RO(C2)-graded C2-equivariant cohomology of a point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' AC2, C2-equivariant Steenrod algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' K(M, V ) or shortly KV , the V th equivariant Eilenberg-Mac Lane space with a Mackey functor M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' πG V (X), C2-equivariant homotopy of a G-space X as a Mackey functor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sqk C2, C2-equivariant Steenrod squaring operations for k ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' RP ∞ tw, the space of lines in the complete universe U = (Rρ)∞, which is equivalent to K(Z/2, σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Acknowledgements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' I would like to thank Michael A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Hill for valuable conversa- tions and providing me some suggestions for calculations, and William Kronholm for producing the action of Steenrod squares on the cohomology ring of RP ∞ tw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Lastly and most importanly, I would like to state my gratitude to my advisor, Douglas C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Ravenel, for his patience, support, and encouragement throughout my graduate studies, and numerous beneficial conversations and suggestions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The work in this paper was part of the author’s dissertation while at the University of Rochester.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Preliminaries In this section we give the main tools that are used in the rest of the article.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Let X be a G-space, where G = C2 is a cyclic group with generator γ such that γ2 = e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The group C2 has two irreducible real representations, namely the trivial COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES 3 representation denoted by 1 (or R) and the sign representation denoted by σ (or R−).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The regular representation is isomorphic to ρC2 = 1 + σ (it is denoted by ρ if there is no confusion).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Thus the representation ring RO(C2) is free abelian of rank 2, so every representation V can be expressed as V = rσ + s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A G-universe is a countably infinite-dimensional G-representation which contains the trivial G-representation and which contains infinitely many copies of each of its finite-dimensional subrepresentations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Also, a complete G-universe is just a G-universe that contains infinitely many copies of every irre- ducible G-representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A G-spectrum E on a G-universe U is a collection EV of based G-spaces together with basepoint-preserving G-maps σV,W : ΣW−V EV −→ EW whenever V ⊂ W ⊂ U, where W − V denotes the orthogonal complement of V in W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' It is required that σV,V is identity, and the commutativity of the diagram ΣW−V ΣV −UEU ΣW−V EV EW ΣW −V σU,V σU,W σV,W for U ⊂ V ⊂ W ⊂ U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' If the adjoint structure maps ˜σV,W : EV −→ ΩW−V EW are weak homotopy equivalences for V ⊂ W ⊂ U, then a G-spectrum is called G − Ω-spectrum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A G-spectrum indexed on a complete(trivial) G-universe is called genuine(naive).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' For an actual representation V of G and a G-space X, the V -th homotopy group of X is the Mackey functor πV (X) determined by πV (X)(G/H) = [SV , X]H for every H < G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' For a virtual representation V ∈ RO(G) and a G-spectrum E, the V -th homo- topy group of E is the Mackey functor πV (E) determined by πV (E)(G/H) = colimnπ0(ΩV +WnEWn)H where {Wn|n ∈ N} is an increasing sequence of representations · · ⊂ Wn ⊂ Wn+1 ⊂ · · · such that any finite dimensional representation V of G admits an equivariant em- bedding in some Wn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Lewis, May and Mcclure [8] defined an ordinary cohomology theory H⋆ G(− : M) on G-spaces with Mackey functor M coefficients and the graded by real orthogonal representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 4 U ˘GUR Y˙I ˘G˙IT Throughout this paper, the Mackey functor will typically be the constant Mackey functor M = Z/2, which can be given the following diagram in Lewis notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='1) Z/2 Id � Z/2 Id � 0 � The ordinary equivariant cohomology M C2 2 of a point with this coefficient is given in the Figure 1 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Every • in the figure represents a copy of Z/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' As you see in the Figure 1 below, there are two elements of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The inclusion map of the fixed point set (the north and south poles) a : S0 −→ Sσ defines an element in πC2 −σ(S−0), and we will use the same symbol for its mod 2 Hurewicz image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' It is called an Euler class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' One can show that HC2 1 (Sσ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) = HC2 1−σ(S−0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) = Z/2 and we denote its generator by u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Dually, we have a ∈ Hσ C2(S−0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) and u ∈ Hσ−1 C2 (S−0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' These are the analog of elements ρ and τ in real motivic homotopy theory, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' −6 −4 −2 0 2 4 6 −6 −4 −2 0 2 4 6 1 u u2 a ua a2 θau θ au2 θ a2u Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The equivariant cohomology M C2 2 of a point The coordinate (x, y) represents degree (x − y) + σy, which is convenient with the motivic bidegree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Red and blue lines represent multiplication by u and a, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Now, we will give the definition of equivariant connectivity of G-spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES 5 Definition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [10] (i) A function ν∗ from the set of conjugacy classes of subgroups of G to the integers is called a dimension function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The value of ν∗ on the conju- gacy class of K ⊂ G is denoted by νK.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Let ν∗ and µ∗ be two dimension functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' If νK ≥ µK for every subgroup K, then ν∗ ≥ µ∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Associated to any G-representation V is the dimension function |V ∗| whose value at K is the real dimension of the K-fixed subspace V K of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The dimension function with constant integer value n is denoted n∗ for any integer n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (ii) Let ν∗ be a non-negative dimension function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' If for each subgroup K of G, the fixed point space Y K is νK-connected, then a G-space Y is called G-ν∗-connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' If A G-space Y is G-0∗-connected, then it is called G-connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Also, if it is G-1∗-connected, it is called simply G-connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A G-space Y is homologically G-ν∗-connected if, for every subgroup K of G and every integer m with 0 ≤ m ≤ νK , the homol- ogy group HK m(Y ) is zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (iii) Let ν∗ be a non-negative dimension function and let f : Y −→ Z be a G-map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' If, for every subgroup K of G, (f K)∗ : πm(Y k) −→ πm(ZK) is an isomorphism for every integer m with 0 ≤ m < νK and an epi- morphism for m = νK, then f is called G-ν∗-equivalence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A G-pair (Y, B) is said to be G-ν∗-connected if the inclusion of B into Y is a G-ν∗-equivalence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The notions of homology G-ν∗-equivalence and of homology G-ν∗-connectedness for pairs are defined similarly, but with homotopy groups replaced by homology groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (iv) Let V be a G-representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' For each subgroup K of G, let V (K) be the orthogonal complement of V K ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' then V (K) is a K-representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' If πK V (K)+m(Y ) is zero for each subgroup K of G and each integer m with 0 ≤ m ≤ |V K|, the G-space Y is called G-V -connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Similarly, if HG V (K)+m(Y ) is zero for each subgroup K of G and each integer m with 0 ≤ m ≤ |V K|, then the G-space Y is called homologically G- V -connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (v) Let V be a G-representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A G-0∗-equivalence f : Y −→ Z is said to be a G-V -equivalence if, for every subgroup K of G, the map f∗ : πK V (K)+m(Y ) −→ πK V (K)+m(Z) is an isomorphism for every integer m with 0 ≤ m < |V K| and an epimor- phism for m = |V K|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A homology G-V -equivalence is defined similarly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A G-pair (Y, B) is called G-V -connected (respectively, homologically G-V -connected) if the inclusion of B into Y is a G-V -equivalence (re- spectively, homology G-V -equivalence).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 6 U ˘GUR Y˙I ˘G˙IT 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' C2-Equivariant Steenrod Algebra The analog of the mod 2 Steenrod algebra is defined by Voevodsky [19] in the motivic case, and Po Hu and Igor Kriz [5] in the equivariant case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The two descrip- tions are essentially the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' One has squaring operations Sqk C2 for k ≥ 0, whose degrees |Sqk C2| = � i(1 + σ) for k = 2i i(1 + σ) + 1 for k = 2i + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq0 C2 = 1 as in the classical case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The C2-equivariant Steenrod algebra acts on the coefficient ring M C2 2 by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='1) Sqk C2(u) = \uf8f1 \uf8f2 \uf8f3 u for k = 0 a for k = 1 0 else.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='2) Sq2m+δ C2 (u2l+ǫ) = � 2l + ǫ 2m + δ � u2l+ǫ−m−δa2m+δ The difficulty in deriving the formula 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='2 is the C2-equivariant Cartan formula 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='7, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Since |Sq2m+δ C2 | = m(1 + σ) + δ for 0 ≤ δ ≤ 1, we have (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='3)\uf8f1 \uf8f2 \uf8f3 ∆(Sq2m+1 C2 ) = � 0≤i≤2m+1 Sqi C2 ⊗ Sq2m+1−i C2 ∆(Sq2m C2 ) = � 0≤j≤m Sq2i C2 ⊗ Sq2m−2j C2 + u � 1≤j≤m Sq2j−1 C2 ⊗ Sq2m−2j+1 C2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The terms divisible by u make things difficult.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Here we are using cohomological degree, so |u| = σ − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Note that |u−mSq2m+δ C2 | = m(1 − σ) + m(1 + σ) + δ = 2m + δ and define Sq2m+δ := u−mSq2m+δ C2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We will see that these operations satisfy the classical Cartan formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We have ∆(Sq2m+1) = u−m∆(Sq2m+1 C2 ) = u−m � 0≤i≤2m+1 Sqi C2 ⊗ Sq2m+1−i C2 = � 0≤i≤2m+1 u−⌊i/2⌋Sqi C2 ⊗ u−⌊(2m+1−i)/2⌋Sq2m+1−i C2 = � 0≤i≤2m+1 Sqi C2 ⊗ Sq2m+1−i C2 since ⌊i/2⌋ + ⌊(2m + 1 − i)/2⌋ = m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' And also, ∆(Sq2m) = u−m∆(Sq2m C2 ) COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES 7 = u−m � 0≤j≤m Sq2j C2 ⊗ Sq2m−2j C2 + u1−m � 1≤j≤m Sq2j−1 C2 ⊗ Sq2m−2j+1 C2 = � 0≤j≤m u−jSq2j C2 ⊗ uj−mSq2m−2j C2 + � 1≤j≤m u1−jSq2j−1 C2 ⊗ uj−mSq2m−2j+1 C2 = � 0≤j≤m Sq2j C2 ⊗ Sq2m−2j C2 + � 1≤j≤m Sq2j−1 C2 ⊗ Sq2m−2j+1 C2 = � 0≤i≤2m Sqi C2 ⊗ Sq2m−i C2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Now, if we use homological degree, then |Sqm| = −m, |a| = −σ, and |u| = 1 − σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We know that (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='4) Sqm C2(u) = \uf8f1 \uf8f2 \uf8f3 u for m = 0 a for m = 1 0 else.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Consider the total Steenrod operation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='5) Sqt = � i≥0 tiSqi, where t is a dummy variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Although this sum is infinite, it yields a finite sum when applied to any monomial in a and u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The classical Cartan formula satisfied by operations Sqi implies that it is a ring homomorphism, meaning that Sqt(xy) = Sqt(x)Sqt(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='4 implies that Sqt(u) = u + ta Sqt(ul) = (u + ta)l = � 0≤m≤l � l m � tmul−mam = � 0≤m≤l tmSqm(ul).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Hence, Sqm(ul) is the coefficient of tm in the first sum above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' It follows that Sq2m+δ(u2l+ǫ) = � 2l + ǫ 2m + δ � u2l+ǫ−2m−δa2m+δ Sq2m+δ(u2l+ǫ) = umSq2m+δ(u2l+ǫ) = � 2l + ǫ 2m + δ � u2l+ǫ−m−δa2m+δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' As a result, we have the following: Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq2m+δ C2 (u2l+ǫ) = � 2l + ǫ 2m + δ � u2l+ǫ−m−δa2m+δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 8 U ˘GUR Y˙I ˘G˙IT The natural action of the Steenrod algebra in homology is on the right, not on the left.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Classically, the mod p cohomology of a space or a spectrum X is a left module over the Steenrod algebra A, so there is a map cX : A ⊗ H∗X → H∗X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The Steenrod algebra has a multiplication φ∗ : A ⊗ A → A (the symbol φ∗ and its dual φ∗ are taken from Milnor’s paper [14]) and the following diagram commutes (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='6) A ⊗ A ⊗ H∗X A ⊗ H∗X A ⊗ H∗X H∗X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' φ∗⊗H∗X A⊗cX cX cX Milnor defines a right action of A on H∗X by the rule ⟨xa, y⟩ = ⟨x, ay⟩ ∈ Fp for x ∈ H∗X, a ∈ A and y ∈ H∗X, where the brackets denotes the evaluation of the cohomology class on the right on the homology class on the left.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Milnor denotes by λ∗ the resulting map H∗X ⊗ A → H∗X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The same thing happens in the C2-equivariant case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' For example, we have (u2)Sq3 C2 = (u2)Sq1 C2Sq2 C2 = 0 because (u2)Sq1 C2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' And, (u2)χ(Sq3 C2) = (u2)Sq2 C2Sq1 C2 = (ua2)Sq1 C2 = a3, where χ(−) means the conjugate Steenrod operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Hence, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='2 should really read as (u2l+ǫ)Sq2m+δ C2 = � 2l + ǫ 2m + δ � u2l+ǫ−m−δa2m+δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' For example, Sql C2(u−1) = �−1 l � alu−1−l = \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 �−1 0 � u−1 = u−1 for l = 0 �−1 1 � au−2 = au−2 for l = 1 �−1 2 � a2u−3 = a2u−3 for l = 2 0 for l ≥ 3 Action on the other elements is determined by the Cartan formula (iv) given below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We now give axioms for the squares Sqk C2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' For the motivic case, you can check Voevodsky paper [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' But, the Adem relation is fixed by Jo¨el Riou in [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (i) Sq0 C2 = 1 and Sq1 C2 = βC2, Bockstein homomorphism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (ii) βSq2k C2 = Sq2k+1 C2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES 9 (iii) βSq2k+1 C2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (iv) (Cartan formula) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='7) Sq2k C2(xy) = k � r=0 Sq2r C2(x)Sq2k−2r C2 (y) + u k−1 � s=0 Sq2s+1 C2 (x)Sq2k−2s−1 C2 (y) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='8) Sq2k+1 C2 (xy) = 2k+1 � r=0 Sqr C2(x)Sq2k+1−r C2 (y) + a k−1 � s=0 Sq2s+1 C2 (x)Sq2k−2s−1 C2 (y) (v) (Adem relation) If 0 < i < 2j,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' then when i + j is even Sqi C2Sqj C2 = [i/2] � k=0 �b − 1 − k i − 2k � uǫSqi+j−k C2 Sqk C2 where ǫ = � 1 for k is odd and i and j are even 0 else when i + j is odd Sqi C2Sqj C2 = [i/2] � k=0 �j − 1 − k i − 2k � Sqi+j−k C2 Sqk C2 + a � k=odd ε Sqi+j−k C2 Sqk C2 where ε = � �j−1−k i−2k � for i is odd � j−1−k i−2k−1 � for j is odd (vi) If x has a degree kσ + k,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' then Sq2k C2(x) = x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (vii) (instability) If x has a degree V , V < kσ + k then Sq2k C2(x) = 0, where V < V ′ if and only if V ′ = V + W for some actual representations W with positive degree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Note that setting u = 1 and a = 0 reduces the Cartan formula (iv) to the classical Cartan formula, and Adem relation (v) to the classical Adem relation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Examples 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We have ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2Sqn ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 = ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sqn+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n is even ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n is odd ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sq2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2Sqn ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 = ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sqn+2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='+ uSqn+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n ≡ 0 mod 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sqn+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n ≡ 1 mod 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='uSqn+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n ≡ 2 mod 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sqn+2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='+ Sqn+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n ≡ 3 mod 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='and ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sq3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2Sqn ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 = ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='\uf8f3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sqn+3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='+ aSqn+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n ≡ 0 mod 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sqn+2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n ≡ 1 mod 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='aSqn+1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n ≡ 2 mod 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Sqn+2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 Sq1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='C2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='for n ≡ 3 mod 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='U ˘GUR Y˙I ˘G˙IT ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='Now,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' let SqI C2 denote Sqi1 C2Sqi2 C2 · · · Sqin C2 for a sequence of integers I = (i1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' · · · ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' in).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The sequence I is said to be admissible if is ≥ 2is+1 for all s ≥ 1, where is+1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The operations SqI C2 with admissible I are called admissible monomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We also call Sq0 C2 admissible, where Sq0 C2 = SqI C2 for empty I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Lemma 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The admissible monomials form a basis for the C2-equivariant Steenrod algebra AC2 as a H⋆(pt)-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The proof follows from the Adem relations and the Cartan formula as in the classical case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' □ For the graded AC2-module structure and Hopf algebra structure of equivariant Steenrod algebra, one can look [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We will now give unstable module structure of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Definition 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' An AC2-module is unstable if it satisfies the preceeding instability condition (vii).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We define the excess of Sqk C2 to be the degree of Sqk C2 e(Sqk C2) = � iρ for k = 2i iρ + 1 for k = 2i + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' So, e(Sqk C2) = |Sqk C2|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then the excess of SqI C2 = Sqi1 C2Sqi2 C2 · · · Sqik C2 to be e(SqI C2) = � j e(Sqij C2) − ρe(Sqij+1 C2 ) where ρ(rσ + s) = (r + s)ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Examples 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The monomial with e(SqI C2) = 0 is Sq0 C2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The monomials with e(SqI C2) = 1 are Sq1 C2, Sq2 C2Sq1 C2, Sq4 C2Sq2 C2Sq1 C2, · · · There is no monomial with e(SqI C2) = σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The monomials with e(SqI C2) = 2 are Sq3 C2Sq1 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq6 C2Sq3 C2Sq1 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq12 C2Sq6 C2Sq3 C2- Sq1 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' · · · The monomials with e(SqI C2) = ρ are Sq2 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq4 C2Sq2 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq8 C2Sq4 C2Sq2 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' · · · There is no monomial with e(SqI C2) = 2σ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The monomials with e(SqI C2) = 3 are Sq7 C2Sq3 C2Sq1 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq11 C2Sq5 C2Sq2 C2Sq1 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' · · The monomials with e(SqI C2) = 2 + σ are Sq3 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq4 C2Sq1 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq5 C2Sq2 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq6 C2Sq3 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq6 C2Sq2 C2Sq1 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Sq8 C2Sq4 C2Sq1 C2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' · · · There is no monomial with e(SqI C2) = 1 + 2σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Remark 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' There is no monomial with e(SqI C2) = rσ + s if r > s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Let tj,k = Sqj2k−1 C2 · · Sqj C2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then the set of elements with total excess 1 is {t1,k1|k1 > 0} .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The set of elements with total excess 2 is � t1+2k1 ,k2+1t1,k1|k1, k2 ≥ 0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES 11 The set of elements with total excess 3 is � t1+2k2 +2k1+k2 ,k3+1t1+2k1 ,k2t1,k1|k1, k2, k3 ≥ 0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The C2-equivariant mod 2 dual Steenrod algebra (one can check [16], or [5] for details) is AC2 = M C2 2 [τi, ξi]/(τ 2 i + aτi+1ηR(u)ξi+1) such that ηR(u) = u + aτ0 ηR(a) = a |ξi| = (2i − 1)ρ |τi| = 1 + |ξi| ∆(ξi) = i � j=0 ξ2j i−j ⊗ ξj, where ξ0 = 1 ∆(τi) = τi ⊗ 1 + i � j=0 ξ2j i−j ⊗ τj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Equivariant Eilenberg-Mac Lane Spaces For each Mackey functor M, there is an Eilenberg-Mac Lane G-spectrum HM which has the property as Mackey functors πG n (HM) = � M n = 0 0 n ∈ Z, n ̸= 0 One can check [13, Chapter XIII, page 162] for the proof of the existence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Let M be a Mackey functor, the V th space in the Ω-spectrum for HM is called an equivariant Eilenberg-Mac Lane space of type K(M, V ), which is a classifying space for the functor HV G(−;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' M).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' That is, given any real orthogonal representations V , W, there is a G-homotopy equivalence K(M;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' V ) ≃ ΩW K(M, V +W) satisfying various compatibility properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Such spaces are constructed in [9], or one can look [3] for a construction with a different method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Here, I will give the definition of them for consistency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Definition 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [9] Let V be a real orthogonal representation with |V G| ≥ 1 and M be a Mackey functor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' An equivariant Eilenberg-Mac Lane space K(M, V ) is a based, (|V ∗|−1)-connected G-space with the G-homotopy type of a G-CW complex such that πG V (K(M, V )) = M, and for πG V +k(K(M, V )) = 0 k ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Remark 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' One can ask what πG V +nσ(K(M, V )) is for n > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Our main interest is K(Z/2, V ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then, πC2 V +nσ(K(Z/2, V ))(C2/e) = πe V +nσ(K(Z/2, V )) = 0 and πC2 V +nσ(K(Z/2, V ))(C2/C2) = πC2 V +nσ(K(Z/2, V )) ∼= ˜HC2 V +nσ(SV ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) 12 U ˘GUR Y˙I ˘G˙IT ∼= ˜HC2 nσ(S0,0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) ∼= HC2 nσ(∗;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) So, as a Mackey functor, the homotopy πG V +nσ(K(M, V )) is one of the Z/2 � 0 Id � � or 0 � 0 Id � � depending on the dimension of the representation V and n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' As mentioned before, one can check [9] for existence and some properties of these spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Another approach to construct equivariant Eilenberg-Mac Lane spaces is Dos Santos [3] approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' As we know in the classical case, the free abelian group on the n-sphere is a model for the Eilenberg-Mac Lane space K(Z, n), and the free F2- vector space on the n-sphere is a model for the Eilenberg-Mac Lane space K(F2, n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Dos Santos constructed a topological abelian group M ⊗ X in [3, Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' ], which is the equivariant generalization of previous sentence for a Mackey functor M, and proved an RO(G)-graded version of equivariant Dold-Thom theorem proved by Lima-Filho for Z-graded case in [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Let M be a Z[G]-module, M be the Mackey functor associated to M: the value of M on G/H is M H and the value on the projection G/K −→ G/H, for K < H < G, is the inclusion of M H ֒→ M K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We define M ⊗ X as the Z[G]-module with a topology as follows([3, Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' ]): Let (X, ∗) be a based G-set, M ⊗X denote the Z[G]-module � x∈X−{∗} M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The action of g ∈ G is given by (g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='m)x = g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='mg−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='x, where mx denotes the xth coordinate of m ∈ � x∈X−{∗} M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Given (X, ∗) a based G-space, M ⊗ X can be equivalently defined as the quotient M ⊗ X = ∐n≥0M n × Xn/ ∽, where ∽ is the equivalence relation generated by: (i) (r, φ∗x) ∽ (φ∗r, x), for each based map φ : {0, · · · , n} −→ {0, · · · , m}, n, m ∈ N, where φ∗x = x ◦ φ, and (φ∗r)i = � k∈φ−1(i) rk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (ii) ((r, r′), (x, ∗)) ∽ (r, x), for each r ∈ M n, r′ ∈ M, x ∈ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We give the discrete topology to M and endow M ⊗ X with the quotient topology corresponding to the relation ∽.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We can define Eilenberg-Mac Lane spaces as KV = M ⊗ SV .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' In our case, Km+nσ = Z/2 ⊗ Sm+nσ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Theorem 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [3] Let X be a based G-CW-complex and let V be a finite dimensional G-representation, then M ⊗ X is an equivariant infinite loop space and there is a natural equivalence πG V (M ⊗ X) ∼= ˜HG V (X;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' M) As a corollary to this theorem we have that M ⊗ SV is a K(M, V ) space (as Definition 11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Thus we have a simple model for the equivariant Eilenberg-Mac Lane spectrum HM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES 13 Examples 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (i) K(Z/2, 1) is RP ∞, with trivial action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (ii) Recall that RP ∞ tw = P(U) is the space of lines in the complete universe (Definition 1) U = (Rρ)∞ [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The cohomology of RP ∞ tw is calculated by Kronholm in [?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The space RP ∞ tw is equivalent to K(Z/2, σ), since it is equivalent to Z/2 ⊗ Sσ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Theorem 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [6] H⋆(RP ∞ tw) ∼= H⋆(pt)[c, d]/(c2 = ac + ud), where deg(c)=σ, and deg(d)=ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Now, we will give a structure of fixed points of equivariant Eilenberg-Mac Lane spaces, which is useful to calculate the cohomology of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Theorem 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [2, Corollary 10] (i) (K(Z/2, rσ + s))e ≃ K(Z/2, r + s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (ii) (K(Z/2, rσ + s))C2 ≃ K(Z/2, s) × · · · × K(Z/2, r + s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Cohomology of Eilenberg-Mac Lane Spaces In classical case the cohomology of Eilenberg- Mac Lane spaces Kn with Z/2- coefficients, which is given by Serre in [18] is a polynomial ring H∗(Kn;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) = P(SqI(ιn)|e(I) < n) where I are admissible sequences, ιn is the fundamental class, and e(SqI) = � j(ij− 2ij+1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We thought that we can give similar description for RO(C2)−graded C2- equivariant cohomology of C2-equivariant Eilenberg-Mac Lane spaces, but these are more complicated than we expect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Let sV,l is the operation that sends x to x2l for x ∈ HV .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' It is possible to express sV,l as a linear combination of Steenrod operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' sV,0 = 1 If x ∈ Ha+bσ, and b = r1 + ⌊ a+b 2 ⌋, then (u−r1x)2 = Sqa+b C2 (u−r1x), so x2 = u2r1Sqa+b C2 (u−r1x) By using C2-equivariant Cartan formula and the formula 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='2 Sq2m+δ C2 (u2l+ǫ) = � 2l + ǫ 2m + δ � u2l+ǫ−m−δa2m+δ one has general formula for Sqa+b C2 (u−r1x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' By iterating this method one can find a formula for every x2l, so sV,l exist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' For example, if x ∈ H3+σ, then (ux)2 = Sq4 C2(ux) = 2 � r=0 Sq2r C2(u)Sq4−2r C2 (x) + 1 � s=0 Sq2s+1 C2 (u)Sq3−2s C2 (x) = uSq4 C2(x) + uaSq3 C2(x) Thus x2 = u−1Sq4 C2(x) + u−1aSq3 C2(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 14 U ˘GUR Y˙I ˘G˙IT The set of elements xi whose finite distinct products form a basis for a graded ring A is called a simple system of generators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' For example, a polynomial algebra k[x] has a simple system of generators � x2i| i ≥ 0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Theorem 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (Borel) Let F → E → B be a C2-fibration with E contractible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Suppose that H⋆(F) has a simple system {xi} of transgressive generators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then H⋆(B) is a polynomial ring in the {Σ(xi)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' E2-page of RO(G)-graded Serre spectral sequence of Kronholm [7] depends only on the total degree of representations, not the dimension of twisted part.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The proof of the theorem is completely same as the classical case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' See, for example, [15, Page 88, Theorem 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A simple system of generators for H⋆(Kσ) ∼= H⋆(pt)[c, d]/(c2 = ac + ud) is � c, d2l|l ≥ 0 � = {c, s1+σ,l(d)|l ≥ 0} By applying the Borel Theorem to the path space fibration Kσ → P(Kρ) → Kρ we have H⋆(Kρ) = P(xρ, sρ,l(x1+ρ)|l ≥ 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A simple system of generator for H⋆(Kρ) is � x2j ρ , (sρ,l(x1+ρ))2j|j, l ≥ 0 � = � sρ,j(xρ), s2lρ+1,jsρ,l(x1+ρ)|j, l ≥ 0 � where |sρ,l(x1+ρ)| = 2lρ + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then, H⋆(K1+ρ) = P(sρ,j(x1+ρ), s2lρ+1,jsρ,l(x2+ρ)|j, l ≥ 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A simple system of generators for H⋆(K1+ρ) is � (sρ,jx1+ρ)2k, (s2jρ+1,jsρ,lx2+ρ)2k|j, l, k ≥ 0 � = � s2jρ+1,ksρ,jx2+σ, s2j(2lρ+1)+1,ks2lρ+1,jsρ,lx2+ρ|j, l, k ≥ 0 � where |s2lρ+1,jsρ,lx2+ρ| = 2j(2lρ + 1) + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then, H⋆(K2+ρ) = P(s2jρ+1,ksρ,jx2+ρ, s2j(2lρ+1)+1,ks2lρ+1,jsρ,lx3+ρ|j, l, k ≥ 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Thus, by iterating this process, one can find the RO(C2)-graded cohomology of C2-equivariant Eilenberg-Mac Lane spaces Kn+σ H⋆(Kn+σ) for n ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Conjecture 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We know that H∗(K1) = P(x1) H∗(Kσ) = P(xσ, x1+σ)/(x2 σ + axσ + ux1+σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' If we knew H⋆(K1+k(σ−1)) for all k ≥ 0, then we could use the Borel theorem to find H⋆(K1+m+k(σ−1)) for m ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' We conjectured that H⋆(K1+k(σ−1)) is a polynomial algebra on k + 1 generators with k relations, saying that the square of each of the first k generators is a linear combination of the other generators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The dimensions of the first k generators of COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES 15 the H⋆(K1+k(σ−1)) are obtained by adding σ − 1 to those of the generators of the H⋆(K1+(k−1)(σ−1)), and the dimension of the last generator is kσ + 2k − k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Example 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Let k = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then H∗(K3σ−2) = P(x3σ−2, x3σ−1, x3σ+1, x3σ+5)/(x2 3σ−2+· · · , x2 3σ−1+· · · , x2 3σ+1+· · · ) where the other terms in the relations are linear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The resulting simple system of generators is {x3σ−2, x3σ−1, x3σ+1} ∪ {x2i 3σ+5|i ≥ 0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Now, we give another useful lemma for computations, which is the cohomology analogous of the Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' in [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' There is a forgetful map Φe : HV C2(X;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) −→ H|V |(Xe;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) from the equivariant cohomology to the non-equivariant cohomology with Z/2- coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' And also, we have a fixed point map ΦC2 : HV C2(X;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) −→ H|V C2 |(XΦC2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z/2) where XΦC2 is a geometric fixed point of a G-space X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Now, I will state the lemma, whose proof is the analog of Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' in [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Lemma 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Let X be a genuine C2-spectrum, and suppose that {bi} is a set of elements of H⋆(X) such that (i) {Φe(bi)} is a basis of H∗(Xe), and (ii) {ΦC2(bi)} is a basis of H∗(XΦC2) Then H⋆(X) is free over H⋆(pt) with the basis {bi}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' One project is to finish calculations of the RO(C2)−graded C2-equivariant co- homology of C2-equivariant Eilenberg-Mac Lane spaces by using Caruso theorem 16 and lemma 20, and then Eilenberg-Moore spectral sequences of Michael A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Hill [4, Chapter 5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Conjecture 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' H⋆(Krσ+s) is a polynomial algebra on certain C2-equivariant Steenrod operations SqI C2(ιrσ+s) divided by certain powers of u, where e(I) < rσ+s, and ιrσ+s is the fundamental class, and V < V ′ if and only if V ′ = V + W for some actual representations W with positive degree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Example 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' H⋆(K(Z/2, 1 + σ)) is the polynomial algebra generated by elements SqI(ι1+σ), where I is admissible and e(I) < 1 + σ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' So, it is a polynomial algebra P(Sq0(ι1+σ), Sq2Sq1(ι1+σ), Sq4Sq2Sq1(ι1+σ), · · · ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Then, it is shortly P(xρ, x1+2ρ, x1+4ρ, x1+8ρ, · · · ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' By now, we have calculated the cohomology of K(Z/2, n + σ) for n ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' To calculate other cases, if knew H∗(Knσ) for n ≥ 2, we could use the Eilenberg- Moore spectral sequence [4, Chapter 5] anf the Borel theorem for the path-space fibration ΩK(Z/2, V ) −→ P(K(Z/2, V )) −→ K(Z/2, V ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' 16 U ˘GUR Y˙I ˘G˙IT For example, for the path-space fibration K(Z/2, σ) −→ P(K(Z/2, 1 + σ)) −→ K(Z/2, 1 + σ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' E∞-term of the Eilenberg-Moore spectral sequence is E∞ = E(xσ, xρ, x2ρ, x4ρ, · · · ) with the relations x2 σ = axσ + ux1+σ x2 1+σ = x2+2σ x2 2+2σ = x4+4σ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' x2 2iρ = x2i+1ρ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' for i ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' As a result, H⋆(K(Z/2, σ), Z/2) is a quadratic extension of a polynomial algebra, as it has already known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' References [1] Mark Behrens and Dylan Wilson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A c2-equivariant analog of mahowald’s thom spectrum theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Proceedings of the American Mathematical Society, 146, 07 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [2] Jeffrey L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Caruso.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Operations in equivariant Z/p-cohomology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Cambridge Philos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', 126(3):521–541, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [3] Pedro F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' dos Santos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A note on the equivariant Dold-Thom theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Pure Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Algebra, 183(1-3):299–312, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [4] Michael A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Hill.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Freeness and equivariant stable homotopy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Journal of Topology, 15(2):359– 397.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [5] Po Hu and Igor Kriz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Real-oriented homotopy theory and an analogue of the Adams-Novikov spectral sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Topology, 40(2):317–399, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [6] William C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Kronholm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' A freeness theorem for RO(Z/2)-graded cohomology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Topology Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', 157(5):902–915, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [7] William C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Kronholm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The RO(G)-graded Serre spectral sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Homology Homotopy Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', 12(1):75–92, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [8] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Lewis, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' May, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' McClure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Ordinary RO(G)-graded cohomology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Bull.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' ), 4(2):208–212, 1981.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [9] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Gaunce Lewis, Jr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Equivariant Eilenberg-Mac Lane spaces and the equivariant Seifert-van Kampen and suspension theorems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Topology Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', 48(1):25–61, 1992.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [10] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Gaunce Lewis, Jr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The equivariant Hurewicz map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', 329(2):433– 472, 1992.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [11] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Lima-Filho.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' On the equivariant homotopy of free abelian groups on G-spaces and G-spectra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', 224(4):567–601, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [12] Mark Mahowald.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The image of J in the EHP sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' of Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (2), 116(1):65–112, 1982.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [13] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' May.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Equivariant homotopy and cohomology theory, volume 91 of CBMS Regional Conference Series in Mathematics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Published for the Conference Board of the Mathematical Sciences, Washington, DC, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' With contributions by M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Cole, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Comeza˜na, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Costenoble, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Elmendorf, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Greenlees, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Lewis, Jr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Piacenza, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Triantafillou, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Waner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [14] John Milnor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' The Steenrod algebra and its dual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' of Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' (2), 67:150–171, 1958.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [15] Robert E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Mosher and Martin C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Tangora.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Cohomology operations and applications in ho- motopy theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Harper & Row, Publishers, New York-London, 1968.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [16] Nicolas Ricka.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Subalgebras of the Z/2-equivariant Steenrod algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Homology Homotopy Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', 17(1):281–305, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [17] Jo¨el Riou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Op´erations de Steenrod motiviques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='org/abs/1207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='3121, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' COHOMOLOGY OF EQUIVARIANT EILENBERG-MAC LANE SPACES 17 [18] Jean-Pierre Serre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Repr´esentations lin´eaires des groupes finis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Hermann, Paris, 1967.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' [19] Vladimir Voevodsky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Reduced power operations in motivic cohomology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Publ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Hautes ´Etudes Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=', (98):1–57, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content=' Istanbul Medeniyet University, Istanbul, TURKEY Email address: ugur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='yigit@medeniyet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} +page_content='tr Current address: Department of Mathematics, Istanbul Medeniyet University, H1-20, Istanbul, TURKEY 34700' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PtE3T4oBgHgl3EQfxwuM/content/2301.04714v1.pdf'} diff --git a/QNE5T4oBgHgl3EQfZA_G/content/tmp_files/2301.05578v1.pdf.txt b/QNE5T4oBgHgl3EQfZA_G/content/tmp_files/2301.05578v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..b0e0d350c4b65ee934921bb57a57849ef2409cfa --- /dev/null +++ b/QNE5T4oBgHgl3EQfZA_G/content/tmp_files/2301.05578v1.pdf.txt @@ -0,0 +1,775 @@ +Toward General Design Principles for Generative AI Applications +JUSTIN D. WEISZ, IBM Research AI, USA +MICHAEL MULLER, IBM Research AI, USA +JESSICA HE, IBM Research AI, USA +STEPHANIE HOUDE, IBM Research AI, USA +Fig. 1. Seven principles for the design of generative AI systems. Six of these principles are presented in overlapping circles, indicating +their relationships to each other. One principle stands alone, the directive to design against potential harms that may be caused by a +generative model’s output, misuse, or other harmful effects. These principles are bounded in an environment of generative variability, +in which the outputs of a generative AI application may vary in quantity, quality, character, or other characteristics. +Generative AI technologies are growing in power, utility, and use. As generative technologies are being incorporated into mainstream +applications, there is a need for guidance on how to design those applications to foster productive and safe use. Based on recent +research on human-AI co-creation within the HCI and AI communities, we present a set of seven principles for the design of generative +AI applications. These principles are grounded in an environment of generative variability. Six principles are focused on designing +for characteristics of generative AI: multiple outcomes & imperfection; exploration & control; and mental models & explanations. In +addition, we urge designers to design against potential harms that may be caused by a generative model’s hazardous output, misuse, +or potential for human displacement. We anticipate these principles to usefully inform design decisions made in the creation of novel +human-AI applications, and we invite the community to apply, revise, and extend these principles to their own work. +CCS Concepts: • Human-centered computing → HCI design and evaluation methods; Interaction paradigms; HCI theory, concepts +and models. +Additional Key Words and Phrases: generative AI, design principles, human-centered AI, foundation models +ACM Reference Format: +Justin D. Weisz, Michael Muller, Jessica He, and Stephanie Houde. 2023. Toward General Design Principles for Generative AI +Applications. In . ACM, New York, NY, USA, 16 pages. https://doi.org/XXXXXXX.XXXXXXX +2023. Manuscript submitted to ACM +1 +arXiv:2301.05578v1 [cs.HC] 13 Jan 2023 + +Generative variability +Multiple +outputs +Control +Imperfection +Mental +Exploration +models +Explanation +Design against harmsHAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Weisz et al. 2023 +1 +INTRODUCTION +As generative AI technologies continue to grow in power and utility, their use is becoming more mainstream. Generative +models, including LLM-based foundation models [9], are being used for applications such as general Q&A (e.g. ChatGPT1), +software engineering assistance (e.g. Copilot2), task automation (e.g. Adept3), copywriting (e.g. Jasper.ai4), and the +creation of high-fidelity artwork (e.g. DALL-E 2 [87], Stable Diffusion [90], Midjourney5). Given the explosion in +popularity of these new kinds of generative applications, there is a need for guidance on how to design those applications +to foster productive and safe use, in line with human-centered AI values [100]. +Fostering productive use is a challenge, as revealed in a recent literature survey by Campero et al. [14]. They found +that many human-AI collaborative systems failed to achieve positive synergy – the notion that a human-AI team is able +to accomplish superior outcomes above either party working alone. In fact, some studies have found the opposite effect, +that human-AI teams produced inferior results to either a human or AI working alone [12, 22, 49, 53]. +Fostering safe use is a challenge because of the potential risks and harms that stem from generative AI, either because +of how the model was trained (e.g. [113]) or because of how it is applied (e.g. [46, 79]). +In order to address these issues, we propose a set of design principles to aid the designers of generative AI systems. +These principles are grounded in an environment of generative variability, indicating the two properties of generative +AI systems inherently different from traditional discriminative6 AI systems: generative, because the aim of generative +AI applications is to produce artifacts as outputs, rather than determine decision boundaries as discriminative AI systems +do, and variability, indicating the fact that, for a given input, a generative system may produce a variety of possible +outputs, many of which may be valid; in the discriminative case, it is expected that the output of a model does not vary +for a given input. +We note that our principles are meant to generally apply to generative AI applications. Other sets of design principles +exist for specific kinds of generative AI applications, including Liu and Chilton [63]’s guidelines for engineering prompts +for text-to-image models, and advice about one-shot prompts for generation of texts of different kinds [25, 40, 89]. +There are also more general AI-related design guidelines [1, 5, 23, 48, 57]. +Six of our principles are presented as “design for...” statements, indicating the characteristics that designers should +keep in mind when making important design decisions. One is presented as a “design against...” statement, directing +designers to design against potential harms that may arise from hazardous model outputs, misuse, potential for human +displacement, or other harms we have not yet considered. The principles interact with each other in complex ways, +schematically represented via overlapping circles in Figure 1. For example, the characteristic denoted in one principle +(e.g. multiple outputs) can sometimes be leveraged as a strategy for addressing another principle (e.g. exploration). +Principles are also connected by a user’s aims, such as producing a singular artifact, seeking inspiration or creative ideas, +or learning about a domain. They are also connected by design features or attributes of a generative AI application, +such as the support for versioning, curation, or sandbox environments. +1http://chat.openai.com +2http://copilot.github.com +3http://adept.ai +4http://jasper.ai +5http://midjourney.com +6Our use of the term discriminative is to indicate that the task conducted by the AI algorithm is one of determining to which class or group a data instance +belongs; classification and clustering algorithms are examples of discriminative AI. Although our use of the term discriminative may evoke imagery +of human discrimination (e.g. via racial, religious, gender identity, genetic, or other lines), our use follows the scientific convention established in the +machine learning community (see, e.g., https://en.wikipedia.org/wiki/Discriminative_model) +2 + +Toward General Design Principles for Generative AI Applications +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Our aim for these principles is threefold: (1) to provide the designers of generative AI applications with the language +to discuss issues unique to generative AI; (2) to provide strategies and guidance to help designers make important design +decisions around how end users will interact with a generative AI application; and (3) to sensitize designers to the idea +that generative AI applications may cause a variety of harms (likely inadvertently, but possibly intentionally). We hope +these principles provide the human-AI co-creation community with a reasoned way to think through the design of +novel generative AI applications. +2 +DESIGN PRINCIPLES FOR GENERATIVE AI APPLICATIONS +We developed seven design principles for generative AI applications based on recent research in the HCI and AI +communities, specifically around human-AI co-creative processes. We conducted a literature review of research studies, +guidelines, and analytic frameworks from these communities [1, 5, 23, 27, 39, 48, 57, 65, 68, 69, 81, 82, 96, 104], which +included experiments in human-AI co-creation [2, 3, 55, 64, 106, 115, 116], examinations of representative generative +applications [11, 50, 51, 64, 72, 87, 90, 92], and a review of publications in recent workshops [35, 77, 78, 114]. +2.1 +The Environment: Generative Variability +Generative AI technologies present unique challenges for designers of AI systems compared to discriminative AI +systems. First, generative AI is generative in nature, which means their purpose is to produce artifacts as output, rather +than decisions, labels, classifications, and/or decision boundaries. These artifacts may be comprised of different types of +media, such as text, images, audio, animations or videos. Second, the outputs of a generative AI model are variable in +nature. Whereas discriminitive AI aims for deterministic outcomes, generative AI systems may not produce the same +output for a given input each time. In fact, by design, they can produce multiple and divergent outputs for a given input, +some or all of which may be satisfactory to the user. Thus, it may be difficult for users to achieve replicable results +when working with a generative AI application. +Although the very nature of generative applications violates the common HCI principle that a system should +respond consistently to a user’s input (for critiques of this position, see [6, 10, 24, 28, 36, 79]), we take the position that +this environment in which generative applications operate – generative variability – is a core strength. Generative +applications enable users to explore or populate a “space” of possible outcomes to their query. Sometimes, this exploration +is explicit, as in the case of systems that enable latent space manipulations of an artifact. Other times, exploration of a +space occurs when a generative model produces multiple candidate outputs for a given input, such as multiple distinct +images for a given prompt [87, 90] or multiple implementations of a source code program [115, 116]. Recent studies +also show how users may improve their knowledge of a domain by working with a generative model and its variable +outputs [92, 115]. +This concept of generative variability is crucially important for designers of generative AI applications to communicate +to users. Users who approach a generative AI system without understanding its probabilistic nature and its capacity to +produce varied outputs will struggle to interact with it in productive ways. The design principles we outline in the +following sections – designing for multiple outcomes & imperfection, for exploration & human control, and for mental +models & explanations – are all rooted in the notion that generative AI systems are distinct and unique because they +operate in an environment of generative variability. +3 + +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Weisz et al. 2023 +2.2 +Design for Multiple Outputs +Generative AI technologies such as encoder-decoder models [21, 107], generative adversarial networks [38], and +transformer models [108] are probabilistic in nature and thus are capable of producing multiple, distinct outputs for a +user’s input. Designers therefore need to understand the extent to which these multiple outputs should be visible to +users. Do users need the ability to annotate or curate? Do they need the ability to compare or contrast? How many +outputs does a user need? +Understanding the user’s task can help answer these questions. If the user’s task is one of production, in which the +ultimate goal is to produce a single, satisfying artifact, then designs that help the user filter and visualize differences +may be preferable. For example, a software engineer’s goal is often to implement a method that performs a specific +behavior. Tools such as Copilot take a user’s input, such as a method signature or documentation, and provide a singular +output. Contrarily, if the user’s task is one of exploration, then designs that help the user curate, annotate, and mutate +may be preferable. For example, a software engineer may wish to explore a space of possible test cases for a code +module. Or, an artist may wish to explore different compositions or styles to see a broad range of possibilities. Below +we discuss a set of strategies for helping design for multiple outputs. +2.2.1 +Versioning. Because of the randomness involved in the generative process, as well as other user-configurable +parameters (e.g. a random seed, a temperature, or other types of user controls), it may be difficult for a user to produce +exactly the same outcome twice. As a user interacts with a generative AI application and creates a set of outputs, they +may find that they prefer earlier outputs to later ones. How can they recover or reset the state of the system to generate +such earlier outputs? One strategy is to keep track of all of these outputs, as well as the parameters that produced them, +by versioning them. Such versioning can happen manually (e.g. the user clicks a button to “save” their current working +state) or automatically. +2.2.2 +Curation. When a generative model is capable of producing multiple outputs, users may need tools to curate +those outputs. Curation may include collecting, filtering, sorting, selecting, or organizing outputs (possibly from the +versioned queue) into meaningful subsets or groups, or creating prioritized lists or hierarchies of outputs according to +some subjective or objective criteria. For example, CogMol7 generates novel molecular compounds, which can be sorted +by various properties, such as their molecular weight, toxicity, or water solubility [18, 19]. In addition, the confidence +of the model in each output it produced may be a useful way to sort or rank outputs, although in some cases, model +confidence scores may not be indicative of the quality of the model’s output [3]. +2.2.3 +Annotation. When a generative model has produced a large number of outputs, users may desire to add marks, +decorators, or annotations to outputs of interest. These annotations may be applied to the output itself (e.g. “I like this”) +or it may be applied to a portion or subset of the output (e.g. flagging lines of source code that look problematic and +need review). +2.2.4 +Visualizing Differences. In some cases, a generative model may produce a diverse set of distinct outputs, such as +images of cats that look strikingly different from each other. In other cases, a generative model may produce a set of +outputs for which it is difficult to discern differences, such as a source code translation from one language to another. +In this case, tools that aid users in visualizing the similarities and differences between multiple outputs can be useful. +Depending on the users’ goals, they may seek to find the invariant aspects across outcomes, such as identifying which +7http://covid19-mol.mybluemix.net +4 + +Toward General Design Principles for Generative AI Applications +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +parts of a source code translation were the same across multiple translations, indicating a confidence in its correctness. +Or, users may prioritize the variant aspects for greater creativity and inspiration. For example, Sentient Sketchbook [59] +is a video game co-creation system that displays a number of different metrics of the maps it generates, enabling users +to compare newly-generated maps with their current map to understand how they differ. +2.3 +Design for Imperfection +It is highly important for users to understand that the quality of a generative model’s outputs will vary. Users who +expect a generative AI application to produce exactly the artifact they desire will experience frustration when they +work with the system and find that it often produces imperfect artifacts. By “imperfect,” we mean that the artifact itself +may have imperfections, such as visual misrepresentations in an image, bugs or errors in source code, missing desired +elements (e.g. “an illustration of a bunny with a carrot” fails to include a carrot), violations of constraints specified in +the input prompt (e.g. “write a 10 word sentence” produces a much longer or shorter sentence), or even untruthful or +misleading answers (e.g. a summary of a scientific topic that includes non-existent references [91]). But, “imperfect” +can also mean “doesn’t satisfy the user’s desire,” such as when the user prompts a model and doesn’t get back any +satisfying outputs (e.g. the user didn’t like any of the illustrations of a bunny with a carrot). Below we discuss a set of +strategies for helping design for imperfection. +2.3.1 +Multiple Outputs. Our previous design principle is also a strategy for handling imperfect outputs. If a generative +model is allowed to produce multiple outputs, the likelihood that one of those outputs is satisfying to the user is +increased. One example of this effect is in how code translation models are evaluated, via a metric called 𝑝𝑎𝑠𝑠@𝑘 [56, 93]. +The idea is that the model is allowed to produce 𝑘 code translations for a given input, and if any of them pass a set of +unit tests, then the model is said to have produced a correct translation. In this way, generating multiple outputs serves +to mitigate the fact that the model’s most-likely output may be imperfect. However, it is left up to the user to review +the set of outputs and identify the one that is satisfactory; with multiple outputs that are very similar to each other, this +task may be difficult [116], implying the need for a way to easily visualize differences. +2.3.2 +Evaluation & Identification. Given that generative models may not produce perfect (or perfectly satisfying) +outputs, they may still be able to provide users with a signal about the quality of its output, or indicate parts that require +human review. As previously discussed, a model’s per-output confidence scores may be used (with care) to indicate +the quality of a model’s output. Or, domain-specific metrics (e.g. molecular toxicity, compiler errors) may be useful +indicators to evaluate whether an artifact achieved a desirable level of quality. Thus, evaluating the quality of generated +artifacts and identifying which portions of those artifacts may contain imperfections (and thus require human review, +discussed further in Weisz et al. [115]) can be an effective way for handling imperfection. +2.3.3 +Co-Creation. User experiences that allow for co-creation, in which both the user and the AI can edit a candidate +artifact, will be more effective than user experiences that assume or aim for the generative model to produce a perfect +output. Allowing users to edit a model’s outputs provides them with the opportunity to find and fix imperfections, and +ultimately achieve a satisfactory artifact. One example of this idea is Github Copilot [37], which is embedded in the +VSCode IDE. In the case when Copilot produces an imperfect block of source code, developers are able to edit it right in +context without any friction. By contrast, tools like Midjourney or Stable Diffusion only produce a gallery of images to +chose from; editing those images requires the user to shift to a different environment (e.g. Photoshop). +5 + +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Weisz et al. 2023 +2.3.4 +Sandbox / Playground Environment. A sandbox or playground environment ensures that when a user interacts +with a generated artifact, their interactions (such as edits, manipulations, or annotations) do not impact the larger +context or environment in which they are working. Returning to the example of Github Copilot, since it is situated inside +a developer’s IDE, code it produces is directly inserted into the working code file. Although this design choice enables +co-creation, it also poses a risk that imperfect code is injected into a production code base. A sandbox environment that +requires users to explicitly copy and paste code in order to commit it to the current working file may guard against the +accidental inclusion of imperfect outputs in a larger environment or product. +2.4 +Design for Human Control +Keeping humans in control of AI systems is a core tenet of human-centered AI [98–100]. Providing users with controls +in generative applications can improve their experience by increasing their efficiency, comprehension, and ownership +of generated outcomes [64]. But, in co-creative contexts, there are multiple ways to interpret what kinds of “control” +people need. We identify three kinds of controls applicable to generative AI applications. +2.4.1 +Generic Controls. One aspect of control relates to the exploration of a design space or range of possible outcomes +(as discussed in Section 2.5). Users need appropriate controls in order to drive their explorations, such as control over +the number of outputs produced from an input or the amount of variability present in the outputs. We refer to these +kinds of controls as generic controls, as they are applicable to any particular generative technology or domain. As an +example, some generative projects may involve a “lifecycle” pattern in which users benefit from seeing a great diversity +of outputs in early stages of the process in order to search for ideas, inspirations, or directions. Later stages of the project +may focus on a smaller number (or singular) output, requiring controls that specifically operate on that output. Many +generative algorithms include a user-controllable parameter called temperature. A low temperature setting produces +outcomes that are very similar to each other; conversely, a high temperature setting produces outcomes that are very +dissimilar to each other. In the “lifecycle” model, users may first set a high temperature for increased diversity, and then +reduce it when they wish to focus on a particular area of interest in the output space. This effect was observed in a +study of a music co-creation tool, in which novice users dragged temperature control sliders to the extreme ends to +explore the limits of what the AI could generate [64]. +2.4.2 +Technology-specific Controls. Other types of controls will depend on the particular generative technology being +employed. Encoder-decoder models, for example, often allow users to perform latent space manipulations of an artifact +in order to control semantically-meaningful attributes. For example, Liu and Chilton [62] demonstrate how semantic +sliders can be used to control attributes of 3D models of animals, such as the animal’s torso length, neck length, and +neck rotation. Transformer models use a temperature parameter to control the amount of randomness in the generation +process [110]. Natural language prompting, and the emerging discipline of prompt engineering [63], provide additional +ways to tune or tweak the outputs of large language models. We refer to these kinds of controls as technology-specific +controls, as the controls exposed to a user in a user interface will depend upon the particular generative AI technology +used in the application. +2.4.3 +Domain-specific Controls. Some types of user controls will be domain-specific, dependent on the type of artifact +being produced. For example, generative models that produce molecules as output might be controlled by having +the user specify desired properties such as molecular weight or water solubility; these types of constraints might be +propagated to the model itself (e.g. expressed as a constraint in the encoder phase), or they may simply act as a filter on +6 + +Toward General Design Principles for Generative AI Applications +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +the model’s output (e.g. hide anything from the user that doesn’t satisfy the constraints). In either case, the control +itself is dependent on the fact that the model is producing a specific kind of artifact, such as a molecule, and would not +logically make sense for other kinds of artifacts in other domains (e.g. how would you control the water solubility for a +text-to-image model?). Thus, we refer to these types of controls, independent of how they are implemented, as domain +specific. Other examples of domain-specific controls include the reading level of a text, the color palette or artistic style +of an image, or the run time or memory efficiency of source code. +2.5 +Design for Exploration +Because users are working in an environment of generative variability, they will need some way to “explore” or “navigate” +the space of potential outputs in order to identify one (or more) that satisfies their needs. Below we discuss a set of +strategies for helping design for exploration. +2.5.1 +Multiple Outputs. The ability for a generative model to produce multiple outputs (Section 2.2) is an enabler of +exploration. Returning to the bunny and carrot example, an artist may wish to explore different illustrative styles and +prompt (and re-prompt) the model for additional candidates of “a bunny with a carrot” in various kinds of styles or +configurations. Or, a developer can explore different ways to implement an algorithm by prompting (and re-prompting) +a model to produce implementations that possess different attributes (e.g. “implement this using recursion,” “implement +this using iteration,” or “implement this using memoization”). In this way, a user can get a sense of the different +possibilities the model is capable of producing. +2.5.2 +Control. Depending on the specific technical architecture used by the generative application, there may be +different ways for users to control it (Section 2.4). No matter the specific mechanisms of control, providing controls to a +user provides them with the ability to interactively work with the model to explore the space of possible outputs for +their given input. +2.5.3 +Sandbox / Playground Environment. A sandbox or playground environment can enable exploration by providing +a separate place in which new candidates can be explored, without interfering with a user’s main working environment. +For example, in a project using Copilot, Cheng et al. [17] suggest providing, “a sandbox mechanism to allow users to +play with the prompt in the context of their own project.” +2.5.4 +Visualization. One way to help users understand the space in which they are exploring is to explicitly visualize +it for them. Kreminski et al. [55] introduce the idea of expressive range coverage analysis (ERCA) in which a user is +shown a visualization of the “range” of possible generated artifacts across a variety of metrics. Then, as users interact +with the system and produce specific artifact instances, those instances are included in the visualization to show how +much of the “range” or “space” was explored by the user. +2.6 +Design for Mental Models +Users form mental models when they work with technological systems [31, 70, 95]. These models represent the user’s +understanding of how the system works and how to work with it effectively to produce the outcomes they desire. Due +to the environment of generative variability, generative AI applications will pose new challenges to users because these +applications may violate existing mental models of how computing systems behave (i.e. in a deterministic fashion). +Therefore, we recommend designing to support users in creating accurate mental models of generative AI applications +in the following ways. +7 + +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Weisz et al. 2023 +2.6.1 +Orientation to Generative Variability. Users may need a general introduction to the concept of generative AI. +They should understand that the system may produce multiple outputs for their query (Section 2.2), that those outputs +may contain flaws or imperfections (Section 2.3), and that their effort may be required to collaborate with the system in +order to produce desired artifacts via various kinds of controls (Section 2.4). +2.6.2 +Role of the AI. Research in human-AI interaction suggests that users may view an AI application as filling a role +such as an assistant, coach, or teammate [96]. In a study of video game co-creation, Guzdial et al. [41] found participants +to ascribe roles of friend, collaborator, student, and manager to the AI system. Recent work by Ross et al. [92] examined +software engineers’ role orientations toward a programming assistant and found that people viewed the assistant with +a tool orientation, but interacted with it as if it were a social agent. Clearly establishing the role of a generative AI +application in a user’s workflow, as well as its level of autonomy (e.g. [32, 45, 84, 97]), will help users better understand +how to interact effectively with it. Designers can reason about the role of their application by answering questions such +as, is it a tool or partner? does it act proactively or does it just respond to the user? does it make changes to an artifact +directly or does it simply make recommendations for the user? +2.7 +Design for Explanations +Generative AI applications will be unfamiliar and possibly unusual to many users. They will want to know what the +application can (and cannot) do, how well it works, and how to work with it effectively. Some users may even wish to +understand the technical details of how the underlying generative AI algorithms work, although these details may not +be necessary to work effectively with the model (as discussed in [115]). +In recent years, the explainable AI (XAI) community has made tremendous progress at developing techniques for +explaining how AI systems work [7, 30, 57, 58, 101]. Much of the work in XAI has focused on discriminative algorithms: +how they generally make decisions (e.g. via interpretable models [74, Chapter 5] or feature importance [74, Section 8.5], +and why they make a decision in a specific instance (e.g. via counterfactual explanations [74, Section 9.3]. +Recent work in human-centered XAI (HCXAI) has emphasized designing explanations that cater to human knowledge +and human needs [30]. This work grew out of a general shift toward human-centered data science [6], in which the +import of explanations is not for a technical user (data scientist), but for an end user who might be impacted by a +machine learning model. +In the case of generative AI, recent work has begun to explore the needs for explainability. Sun et al. [106] explored +explainability needs of software engineers working with a generative AI model for various types of use cases, such +as code translation and autocompletion. They identified a number of types of questions that software engineers had +about the generative AI, its capabilities, and its limitations, indicating that explainability is an important feature for +generative AI applications. They also identified several gaps in existing explainability frameworks stemming from the +generative nature of the AI system, indicating that existing XAI techniques may not be sufficient for generative AI +applications. Thus, we make the following recommendations for how to design for explanations. +2.7.1 +Calibrate Trust by Communicating Capabilities and Limitations. Because of the inherent imperfection of generative +AI outputs, users would be well-served if they understood the limitations of these systems [80, 85], allowing them +to calibrate their trust in terms of what the application can and cannot do [86]. When these kinds of imperfections +(Section 2.3) are not signaled, users of co-creative tools may mistakenly blame themselves for shortcomings of generated +artifacts in co-creative applications [64], and users in Q & A use cases can be shown deceptive misconceptions and +harmful falsehoods as objective answers [60]. One way to communicate the capabilities of a generative AI application +8 + +Toward General Design Principles for Generative AI Applications +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +is to show examples of what it can do. For example, Midjourney provides a public discussion space to orient new users +and show them what other users have produced with the model. This space not only shows the outputs of the model +(e.g. images), but the textual prompts that produced the images. In this way, users can more quickly come to understand +how different prompts influence the application’s output. To communicate limitations, systems like ChatGPT contain +modal screens to inform users of the system’s limitations. +2.7.2 +Use Explanations to Create and Reinforce Accurate Mental Models. Weisz et al. [115] explored how a generative +model’s confidence could be surfaced in a user interface. Working with a transformer model on a code translation +task, they developed a prototype UI that highlighted tokens in the translation that the model was not confident in. +In their user study, they found that those highlights also served as explanations for how the model worked: users +came to understand that each source code token was chosen probabilistically, and that the model had considered +other alternatives. This design transformed an algorithmic weakness (imperfect output) into a resource for users to +understand how the algorithm worked, and ultimately, to control its output (by showing users where they might need +to make changes). +2.8 +Design Against Harms +The use of AI systems – including generative AI applications – may unfortunately lead to diverse forms of harms, +especially for people in vulnerable situations. Much work in AI ethics communities has identified how discriminative +AI systems may perpetuate harms such as the denial of personhood or identity [24, 52, 103]; the deprivation of liberty +or children [66, 94], and the erasure of persons, cultures, or nations through data silences [80]. We identify four types +of potential harms, some of which are unique to the generative domain, and others which represent existing risks of AI +applications that may manifest in new ways. +Our aim in this section is to sensitize designers to the potential risks and harms that generative AI systems may +pose. We do not prescribe solutions to address these risks, in part because it is an active area of research to understand +how these kinds of risks could be mitigated. Risk identification, assessment, and mitigation is a sociotechnical problem +involving computing resources, humans, and cultures. Even with our focus on the design of generative applications, an +analysis of harms that is limited to design concepts may blur into technosolutionism [61, 67, 88]. +We do posit that human-centered approaches to generative AI design are a useful first step, but must be part of a larger +strategy to understand who are the direct and indirect stakeholders of a generative application [34, 44], and to work +directly with those stakeholders to identify harms, understand what are their differing priorities and value tensions [73], +and negotiate issues of culture, policy, and (yes) technology to meet these diverse challenges (e.g., [26, 29, 42]). +2.8.1 +Hazardous Model Outputs. Generative AI applications may produce artifacts that cause harm. In an integrative +survey paper, Weidinger et al. [113] list six types of potential harms of large language models, three of which regard +the harms that may be caused by the model’s output: +• Discrimination, Exclusion, and Toxicity. Generative models may produce outputs that promote discrimina- +tion against certain groups, exclude certain groups from representation, or produce toxic content. Examples +include text-to-image models that fail to produce ethnically diverse outputs for a given input (e.g. a request for +images of doctors produces images of male, white doctors [20] or language models that produce inappropriate +language such as swear words, hate speech, or offensive content [1, 48]. +9 + +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Weisz et al. 2023 +• Information Hazards. Generative models may inadvertently leak private or sensitive information from their +training data. For example, Carlini et al. [16] found that strategically prompting GPT-2 revealed an individual’s full +name, work address, phone number, email, and fax number. Additionally, larger models may be more vulnerable +to these types of attacks [15, 16]. +• Misinformation Harms. Generative models may produce inaccurate misinformation in response to a user’s +query. Lin et al. [60] found that GPT-3 can provide false answers that mimic human falsehoods and misconceptions, +such as “coughing can help stop a heart attack” or “[cold weather] tells us that global warming is a hoax”. Singhal +et al. [102] caution against the tendency of LLMs to hallucinate references, especially if consulted for medical +decisions. Albrecht et al. [4] claim that LLMs have few defenses against adversarial attacks while advising about +ethical questions. The Galactica model was found to hallucinate non-existent scientific references [43], and +Stack Overflow has banned responses sourced from ChatGPT due to their high rate of incorrect, yet plausible, +responses [109]. +In addition to those harms, a generative model’s outputs may be hazardous in other ways as well. +• Deceit, Impersonation, and Manipulation. Generative algorithms can be used to create false records or +“deep fakes” (e.g., [46, 71]), to impersonate others (e.g. [105]), or to distort information into politically-altered +content [118]. In addition, they may manipulate users who believe that they are chatting with another human +rather than with an algorithm, as in the case of an unreviewed ChatGPT “experiment” in which at least 4,000 +people seeking mental health support were connected to a chatbot rather than a human counselor [76]. +• Copyright, Licenses, and Intellectual Property. Generative models may have been trained on data protected +by regulations such as the GDPR, which prohibits the re-use of data beyond the purposes for which it was +collected. In addition, large language models have been referred to as “stochastic parrots” due to their ability +to reproduce data that was used during their training [8]. One consequence of this effect is that the model +may produce outputs that incorporate or remix materials that are subject to copyright or intellectual property +protections [33, 47, 83]. For example, the Codex model, which produces source code as output, may (re-)produce +source code that is copyrighted or subject to a software license, or that was openly shared under a creative +commons license that prohibits commercial re-use (e.g., in a pay-to-access LLM). Thus, the use of a model’s +outputs in a project may cause that project to violate copyright protections, or subject that project to a restrictive +license (e.g. GPL). As of this writing, there is a lawsuit against GitHub, Microsoft, and OpenAI on alleged copyright +violations in the training of Codex [13]. +2.8.2 +Misuse. Weidinger et al. [113] describe how generative AI applications may be misused in ways unanticipated +by the creators of those systems. Examples include making disinformation cheaper and more effective, facilitating +fraud and scams, assisting code generation for cyberattacks, or conducting illegitimate surveillance and censorship. +In addition to these misuses, Houde et al. [46] also identify business misuses of generative AI applications such as +facilitating insurance fraud and fabricating evidence of a crime. Although designers may not be able to prevent users +from intentionally misusing their generative AI applications, there may be preventative measures that make sense for +a given application domain. For example, output images may be watermarked to indicate they were generated by a +particular model, blocklists may be used to disallow undesirable words in a textual prompt, or multiple people may be +required to review or approve a model’s outputs before they can be used. +10 + +Toward General Design Principles for Generative AI Applications +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +2.8.3 +Human Displacement. One consequence of the large-scale deployment of generative AI technologies is that they +may come to replace, rather than augment human workers. Such concerns have been raised in related areas, such as the +use of automated AI technologies in data science Wang et al. [111, 112]. Weidinger et al. [113] specifically discuss the +potential economic harms and inequalities that may arise as a consequence of widespread adoption of generative AI. If a +generative model is capable of producing high-fidelity outputs that rival (or even surpass) what can be created by human +effort, are the humans necessary anymore? Contemporary fears of human displacement by generative technologies are +beginning to manifest in mainstream media, such as in the case of illustrators’ concerns that text-to-image models +such as Stable Diffusion and Midjourney will put them out of a job [117]. We urge designers to find ways to design +generative AI applications that enhance or augment human abilities, rather than applications that aim to replace human +workers. Copilot serves as one example of a tool that clearly enhances the abilities of a software engineer: it operates +on the low-level details of a source code implementation, freeing up software engineers to focus more of their attention +on higher-level architectural and system design issues. +3 +DISCUSSION +3.1 +Designing for User Aims +Users of generative AI applications may have varied aims or goals in using those systems. Some users may be in +pursuit of perfecting a singular artifact, such as a method implementation in a software program. Other users may be +in pursuit of inspiration or creative ideas, such as when exploring a visual design space. As a consequence of working +with a generative AI application, users may also enhance their own learning or understanding of the domain in which +they are operating, such as when a software engineer learns something new about a programming language from the +model’s output. Each of these aims can be supported by our design principles, as well as help designers determine the +appropriate strategy for addressing the challenges posed by each principle. +To support artifact production, designers ought to carefully consider how to manage a model’s multiple, imperfect +outputs. Interfaces ought to support users in curating, annotating, and mutating artifacts to help users refine a singular +artifact. The ability to version artifacts, or show a history of artifact edits, may also be useful to enable users to revisit +discarded options or undo undesirable modifications. For cases in which users seek to produce one “ideal” artifact that +satisfies some criteria, controls that enable them to co-create with the generative tool can help them achieve their goal +more efficiently, and explanations that signal or identify imperfections can tell them how close or far they are from the +mark. +To support inspiration and creativity, designers also ought to provide adequate controls that enable users to explore +a design space of possibilities [55, 75]. Visualizations that represent the design space can also be helpful as they can +show which parts the user has vs. has not explored, enabling them to explore the novel parts of that space. Tools that +help users manage, curate, and filter the different outputs created during their explorations can be extremely helpful, +such as a digital mood board for capturing inspiring model outputs. +Finally, to support learning how to effectively interact with a generative AI application, designers ought to help users +create accurate mental models [54] through explanations [7, 30, 57, 58, 101]. Explanations can help answer general +questions such as what a generative AI application is capable or not capable of generating, how the model’s controls +impact its output, and how the model was trained and the provenance of its training data. They can also answer +questions about a specific model output, such as how confident the model was in that output, which portions of that +11 + +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Weisz et al. 2023 +output might need human review or revision, how to adjust or modify the input or prompt to adjust properties of the +output, or what other options or alternatives exist for that output. +3.2 +The Importance of Value-Sensitive Design in Mitigating Potential Harms +Designers need to be sensitive to the potential harms that may be caused by the rapid maturation and widespread +adoption of generative AI technologies. Although sociotechnical means for mitigating these harms have yet to be +developed, we recommend that designers use a Value Sensitive Design approach [34, 44] when reasoning about how to +design generative AI applications. By clearly identifying the different stakeholders and impacted parties of a generative +AI application, and explicitly enumerating their values, designers can make more reasoned judgments about how those +stakeholders might be impacted by hazardous model outputs, model misuse, and issues of human displacement. +4 +LIMITATIONS AND FUTURE WORK +Generative AI applications are still in their infancy, and new kinds of co-creative user experiences are emerging at a +rapid pace. Thus, we consider these principles to be in their infancy as well, and it is possible that other important +design principles, strategies, and/or user aims have been overlooked. In addition, although these principles can provide +helpful guidance to designers in making specific design decisions, they need to be validated in real-world settings to +ensure their clarity and utility. +5 +CONCLUSION +We present a set of seven design principles for generative AI applications. These principles are grounded in an +environment of generative variability, the key characteristics of which are that a generative AI application will generate +artifacts as outputs, and those outputs may be varied in nature (e.g. of varied quality or character). The principles +focus on designing for multiple outputs and the imperfection of those outputs, designing for exploration of a space or +range of possible outputs and maintaining human control over that exploration, and designing to establish accurate +mental models of the generative AI application via explanations. We also urge designers to design against the potential +harms that may be caused by hazardous model output (e.g. the production of inappropriate language or imagery, the +reinforcement of existing stereotypes, or a failure to inclusively represent different groups), by misuse of the model +(e.g. by creating disinformation or fabricating evidence), or by displacing human workers (e.g. by designing for the +replacement rather than the augmentation of human workers). We envision these principles to help designers make +reasoned choices as they create novel generative AI applications. +REFERENCES +[1] ACM. 2023. Words Matter: Alternatives for Charged Terminology in the Computing Profession. Retrieved 04-January-2023 from https://www.acm. +org/diversity-inclusion/words-matter +[2] Mayank Agarwal, Jorge J Barroso, Tathagata Chakraborti, Eli M Dow, Kshitij Fadnis, Borja Godoy, Madhavan Pallan, and Kartik Talamadupula. +2020. Project clai: Instrumenting the command line as a new environment for ai agents. arXiv preprint arXiv:2002.00762 (2020). +[3] Mayank Agarwal, Kartik Talamadupula, Stephanie Houde, Fernando Martinez, Michael Muller, John Richards, Steven Ross, and Justin D Weisz. +2020. Quality Estimation & Interpretability for Code Translation. In Proceedings of the NeurIPS 2020 Workshop on Computer-Assisted Programming +(NeurIPS 2020). +[4] Joshua Albrecht, Ellie Kitanidis, and Abraham J Fetterman. 2022. Despite" super-human" performance, current LLMs are unsuited for decisions +about ethics and safety. arXiv preprint arXiv:2212.06295 (2022). +[5] Saleema Amershi, Dan Weld, Mihaela Vorvoreanu, Adam Fourney, Besmira Nushi, Penny Collisson, Jina Suh, Shamsi Iqbal, Paul N Bennett, Kori +Inkpen, et al. 2019. Guidelines for human-AI interaction. In Proceedings of the 2019 chi conference on human factors in computing systems. 1–13. +[6] Cecilia Aragon, Shion Guha, Marina Kogan, Michael Muller, and Gina Neff. 2022. Human-Centered Data Science: An Introduction. MIT Press. +12 + +Toward General Design Principles for Generative AI Applications +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +[7] Vijay Arya, Rachel KE Bellamy, Pin-Yu Chen, Amit Dhurandhar, Michael Hind, Samuel C Hoffman, Stephanie Houde, Q Vera Liao, Ronny Luss, +Aleksandra Mojsilovic, et al. 2020. AI Explainability 360: An Extensible Toolkit for Understanding Data and Machine Learning Models. J. Mach. +Learn. Res. 21, 130 (2020), 1–6. +[8] Emily M Bender, Timnit Gebru, Angelina McMillan-Major, and Shmargaret Shmitchell. 2021. On the Dangers of Stochastic Parrots: Can Language +Models Be Too Big?. In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency. 610–623. +[9] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine +Bosselut, Emma Brunskill, et al. 2021. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258 (2021). +[10] Danah Boyd and Kate Crawford. 2012. Critical questions for big data: Provocations for a cultural, technological, and scholarly phenomenon. +Information, communication & society 15, 5 (2012), 662–679. +[11] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish +Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, +Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, +Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language Models are Few-Shot Learners. In Advances in Neural +Information Processing Systems, H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (Eds.), Vol. 33. Curran Associates, Inc., 1877–1901. +https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf +[12] Zana Buçinca, Maja Barbara Malaya, and Krzysztof Z Gajos. 2021. To trust or to think: cognitive forcing functions can reduce overreliance on AI in +AI-assisted decision-making. Proceedings of the ACM on Human-Computer Interaction 5, CSCW1 (2021), 1–21. +[13] Matthew Butterick. 2022. GitHub Copilot Litigation. https://githubcopilotlitigation.com +[14] Andres Campero, Michelle Vaccaro, Jaeyoon Song, Haoran Wen, Abdullah Almaatouq, and Thomas W Malone. 2022. A Test for Evaluating +Performance in Human-Computer Systems. arXiv preprint arXiv:2206.12390 (2022). +[15] Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. 2022. Quantifying memorization across +neural language models. arXiv preprint arXiv:2202.07646 (2022). +[16] Nicholas Carlini, Florian Tramer, Eric Wallace, Matthew Jagielski, Ariel Herbert-Voss, Katherine Lee, Adam Roberts, Tom Brown, Dawn Song, Ulfar +Erlingsson, et al. 2021. Extracting training data from large language models. In 30th USENIX Security Symposium (USENIX Security 21). 2633–2650. +[17] Ruijia Cheng, Ruotong Wang, Thomas Zimmermann, and Denae Ford. 2022. "It would work for me too": How Online Communities Shape Software +Developers’ Trust in AI-Powered Code Generation Tools. arXiv preprint arXiv:2212.03491 (2022). +[18] Vijil Chenthamarakshan, Payel Das, Samuel C Hoffman, Hendrik Strobelt, Inkit Padhi, Kar Wai Lim, Benjamin Hoover, Matteo Manica, Jannis +Born, Teodoro Laino, et al. 2020. Cogmol: target-specific and selective drug design for COVID-19 using deep generative models. arXiv preprint +arXiv:2004.01215 (2020). +[19] Vijil Chenthamarakshan, Payel Das, Inkit Padhi, Hendrik Strobelt, Kar Wai Lim, Ben Hoover, Samuel C. Hoffman, and Aleksandra Mojsilovic. 2020. +Target-Specific and Selective Drug Design for COVID-19 Using Deep Generative Models. arXiv:2004.01215 [cs.LG] +[20] Jaemin Cho, Abhay Zala, and Mohit Bansal. 2022. Dall-eval: Probing the reasoning skills and social biases of text-to-image generative transformers. +arXiv preprint arXiv:2202.04053 (2022). +[21] Kyunghyun Cho, Bart Van Merriënboer, Caglar Gulcehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning +phrase representations using RNN encoder-decoder for statistical machine translation. arXiv preprint arXiv:1406.1078 (2014). +[22] Elizabeth Clark, Anne Spencer Ross, Chenhao Tan, Yangfeng Ji, and Noah A Smith. 2018. Creative writing with a machine in the loop: Case studies +on slogans and stories. In 23rd International Conference on Intelligent User Interfaces. 329–340. +[23] Apple Computer. 2022. Human Interface Guidelines. https://developer.apple.com/design/human-interface-guidelines/guidelines/overview +[24] Sasha Costanza-Chock. 2020. Design justice: Community-led practices to build the worlds we need. The MIT Press. +[25] Paul Denny, Viraj Kumar, and Nasser Giacaman. 2022. Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using +Natural Language. https://arxiv.org/abs/2210.15157 +[26] Norman K Denzin, Yvonna S Lincoln, Linda Tuhiwai Smith, et al. 2008. Handbook of critical and indigenous methodologies. Sage. +[27] Sebastian Deterding, Jonathan Hook, Rebecca Fiebrink, Marco Gillies, Jeremy Gow, Memo Akten, Gillian Smith, Antonios Liapis, and Kate Compton. +2017. Mixed-initiative creative interfaces. In Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems. +628–635. +[28] Catherine D’ignazio and Lauren F Klein. 2020. Data feminism. MIT press. +[29] Carl DiSalvo. 2022. Design as democratic inquiry: putting experimental civics into practice. MIT Press. +[30] Upol Ehsan, Philipp Wintersberger, Q Vera Liao, Elizabeth Anne Watkins, Carina Manger, Hal Daumé III, Andreas Riener, and Mark O Riedl. +2022. Human-Centered Explainable AI (HCXAI): beyond opening the black-box of AI. In CHI Conference on Human Factors in Computing Systems +Extended Abstracts. 1–7. +[31] Stephen M Fiore, Eduardo Salas, and Janis A Cannon-Bowers. 2001. Group dynamics and shared mental model development. How people evaluate +others in organizations 234 (2001). +[32] Paul M Fitts, MS Viteles, NL Barr, DR Brimhall, Glen Finch, Eric Gardner, WF Grether, WE Kellum, and SS Stevens. 1951. Human engineering for an +effective air-navigation and traffic-control system, and appendixes 1 thru 3. Technical Report. Ohio State Univ Research Foundation Columbus. +[33] Giorgio Franceschelli and Mirco Musolesi. 2022. Copyright in generative deep learning. Data & Policy 4 (2022). +[34] Batya Friedman and David G Hendry. 2019. Value sensitive design: Shaping technology with moral imagination. Mit Press. +13 + +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Weisz et al. 2023 +[35] Werner Geyer, Lydia B Chilton, Justin D Weisz, and Mary Lou Maher. 2021. HAI-GEN 2021: 2nd Workshop on Human-AI Co-Creation with +Generative Models. In 26th International Conference on Intelligent User Interfaces-Companion. 15–17. +[36] Lisa Gitelman. 2013. Raw Data is an Oxymoron. MIT Press. +[37] Github. 2021. Copilot. Retrieved 03-August-2021 from https://copilot.github.com +[38] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. 2020. Generative +adversarial networks. Commun. ACM 63, 11 (2020), 139–144. +[39] Imke Grabe, Miguel González-Duque, Sebastian Risi, and Jichen Zhu. 2022. Towards a Framework for Human-AI Interaction Patterns in Co-Creative +GAN Applications. Joint Proceedings of the ACM IUI Workshops 2022, March 2022, Helsinki, Finland (2022). +[40] Cobus Greyling. 2022. Prompt engineering, text generation and large language models. https://cobusgreyling.medium.com/prompt-engineering- +text-generation-large-language-models-3d90c527c6d5 +[41] Matthew Guzdial, Nicholas Liao, Jonathan Chen, Shao-Yu Chen, Shukan Shah, Vishwa Shah, Joshua Reno, Gillian Smith, and Mark O Riedl. 2019. +Friend, collaborator, student, manager: How design of an ai-driven game level editor affects creators. In Proceedings of the 2019 CHI conference on +human factors in computing systems. 1–13. +[42] Gillian R Hayes. 2014. Knowing by doing: action research as an approach to HCI. In Ways of Knowing in HCI. Springer, 49–68. +[43] Will Douglas Heaven. 2022. Why Meta’s latest large language model survived only three days online. https://www.technologyreview.com/2022/ +11/18/1063487/meta-large-language-model-ai-only-survived-three-days-gpt-3-science/ +[44] David G Hendry, Batya Friedman, and Stephanie Ballard. 2021. Value sensitive design as a formative framework. Ethics and Information Technology +23, 1 (2021), 39–44. +[45] Eric Horvitz. 1999. Principles of Mixed-Initiative User Interfaces. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems +(Pittsburgh, Pennsylvania, USA) (CHI ’99). Association for Computing Machinery, New York, NY, USA, 159–166. https://doi.org/10.1145/302979. +303030 +[46] Stephanie Houde, Vera Liao, Jacquelyn Martino, Muller Muller, David Piorkowski, John Richards, Justin D Weisz, and Yunfeng Zhang. 2020. +Business (mis)Use Cases of Generative AI. In Joint Proceedings of the Workshops on Human-AI Co-Creation with Generative Models and User-Aware +Conversational Agents co-located with 25th International Conference on Intelligent User Interfaces (IUI 2020). +[47] Kalin Hristov. 2016. Artificial intelligence and the copyright dilemma. Idea 57 (2016), 431. +[48] IBM. 2023. Racial Equity in Design. Retrieved 04-January-2023 from https://www.ibm.com/design/racial-equity-in-design/ +[49] Maia Jacobs, Melanie F Pradier, Thomas H McCoy, Roy H Perlis, Finale Doshi-Velez, and Krzysztof Z Gajos. 2021. How machine-learning +recommendations influence clinician treatment selections: the example of antidepressant selection. Translational psychiatry 11, 1 (2021), 1–9. +[50] Tristan E Johnson, Youngmin Lee, Miyoung Lee, Debra L O’Connor, Mohammed K Khalil, and Xiaoxia Huang. 2007. Measuring sharedness of +team-related knowledge: Design and validation of a shared mental model instrument. Human Resource Development International 10, 4 (2007), +437–454. +[51] Benjamin Kaiser, Akos Csiszar, and Alexander Verl. 2018. Generative models for direct generation of cnc toolpaths. In 2018 25th International +Conference on Mechatronics and Machine Vision in Practice (M2VIP). IEEE, 1–6. +[52] Shalini Kantayya. 2020. Coded Bias. Retrieved 04-January-2023 from https://www.pbs.org/independentlens/documentaries/coded-bias/ +[53] Bennett Kleinberg and Bruno Verschuere. 2021. How humans impair automated deception detection performance. Acta Psychologica 213 (2021), +103250. +[54] Steven Kollmansberger. 2010. Helping students build a mental model of computation. In Proceedings of the fifteenth annual conference on Innovation +and technology in computer science education. 128–131. +[55] Max Kreminski, Isaac Karth, Michael Mateas, and Noah Wardrip-Fruin. 2022. Evaluating Mixed-Initiative Creative Interfaces via Expressive Range +Coverage Analysis.. In IUI Workshops. 34–45. +[56] Sumith Kulal, Panupong Pasupat, Kartik Chandra, Mina Lee, Oded Padon, Alex Aiken, and Percy S Liang. 2019. Spoc: Search-based pseudocode to +code. Advances in Neural Information Processing Systems 32 (2019). +[57] Q Vera Liao, Daniel Gruen, and Sarah Miller. 2020. Questioning the AI: informing design practices for explainable AI user experiences. In Proceedings +of the 2020 CHI Conference on Human Factors in Computing Systems. 1–15. +[58] Q Vera Liao, Moninder Singh, Yunfeng Zhang, and Rachel Bellamy. 2021. Introduction to explainable ai. In Extended Abstracts of the 2021 CHI +Conference on Human Factors in Computing Systems. 1–3. +[59] Antonios Liapis, Georgios N Yannakakis, Julian Togelius, et al. 2013. Sentient Sketchbook: Computer-aided game level authoring.. In FDG. 213–220. +[60] Stephanie Lin, Jacob Hilton, and Owain Evans. 2021. Truthfulqa: Measuring how models mimic human falsehoods. arXiv preprint arXiv:2109.07958 +(2021). +[61] Silvia Lindtner, Shaowen Bardzell, and Jeffrey Bardzell. 2016. Reconstituting the utopian vision of making: HCI after technosolutionism. In +Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems. 1390–1402. +[62] Vivian Liu and Lydia B Chilton. 2021. Neurosymbolic Generation of 3D Animal Shapes through Semantic Controls.. In IUI Workshops. +[63] Vivian Liu and Lydia B Chilton. 2022. Design Guidelines for Prompt Engineering Text-to-Image Generative Models. In CHI Conference on Human +Factors in Computing Systems. 1–23. +[64] Ryan Louie, Andy Coenen, Cheng Zhi Huang, Michael Terry, and Carrie J Cai. 2020. Novice-AI music co-creation via AI-steering tools for deep +generative models. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems. 1–13. +14 + +Toward General Design Principles for Generative AI Applications +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +[65] Todd Lubart. 2005. How can computers be partners in the creative process: classification and commentary on the special issue. International +Journal of Human-Computer Studies 63, 4-5 (2005), 365–369. +[66] Alexandra Lyn. 2020. Risky Business: Artificial Intelligence and Risk Assessments in Sentencing and Bail Procedures in the United States. Available +at SSRN 3831441 (2020). +[67] Michael A Madaio, Luke Stark, Jennifer Wortman Vaughan, and Hanna Wallach. 2020. Co-designing checklists to understand organizational +challenges and opportunities around fairness in AI. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems. 1–14. +[68] Mary Lou Maher. 2012. Computational and collective creativity: Who’s being creative?. In ICCC. Citeseer, 67–71. +[69] Mary Lou Maher, Brian Magerko, Dan Venura, Douglas Fisher, Rogelio Cardona-rivera, Nancy Fulda, Johannes Gooth, Minwoo Lee, David Wilson, +James Kaufman, et al. 2022. A Research Plan for Integrating Generative and Cognitive AI for Human Centered, Explainable Co-Creative AI. In +ACM CHI Conference on Human Factors in Computing Systems. +[70] John E Mathieu, Tonia S Heffner, Gerald F Goodwin, Eduardo Salas, and Janis A Cannon-Bowers. 2000. The influence of shared mental models on +team process and performance. Journal of applied psychology 85, 2 (2000), 273. +[71] Edvinas Meskys, Julija Kalpokiene, Paulius Jurcys, and Aidas Liaudanskas. 2020. Regulating deep fakes: legal and ethical considerations. Journal of +Intellectual Property Law & Practice 15, 1 (2020), 24–31. +[72] Cade Metz. 2022. Meet GPT-3. It Has Learned to Code (and Blog and Argue). (Published 2020). https://www.nytimes.com/2020/11/24/science/ +artificial-intelligence-ai-gpt3.html +[73] Jessica K Miller, Batya Friedman, Gavin Jancke, and Brian Gill. 2007. Value tensions in design: the value sensitive design, development, and +appropriation of a corporation’s groupware system. In Proceedings of the 2007 international ACM conference on Supporting group work. 281–290. +[74] Christoph Molnar. 2020. Interpretable machine learning. Lulu. com. +[75] Meredith Ringel Morris, Carrie J. Cai, Jess Holbrook, Chinmay Kulkarni, and Michael Terry. 2022. The Design Space of Generative Models. In +Proceedings of the NeurIPS 2022 Workshop on Human-Centered AI (NeurIPS 2022). +[76] Robert R. Morris. 2023. We provided mental health support to about 4,000 people — using GPT-3. Here’s what happened. Retrieved 07-Jan-2023 from +https://twitter.com/RobertRMorris/status/1611450197707464706 +[77] Michael Muller, Plamen Agelov, Hal Daume, Q Vera Liao, Nuria Oliver, David Piorkowski, et al. 2022. HCAI@NeurIPS 2022, Human Centered AI. +In Annual Conference on Neural Information Processing Systems. +[78] Michael Muller, Lydia B Chilton, Anna Kantosalo, Charles Patrick Martin, and Greg Walsh. 2022. GenAICHI: Generative AI and HCI. In CHI +Conference on Human Factors in Computing Systems Extended Abstracts. 1–7. +[79] Michael Muller, Steven I. Ross, Stephanie Houde, Mayank Agarwal, Fernando Martinez, John T. Richards, Kartik Talamadupula, and Justin D. +Weisz. 2022. Drinking Chai with Your (AI) Programming Partner: A Design Fiction about Generative AI for Software Engineering 107-122. In +Joint Proceedings of the IUI 2022 Workshops: APEx-UI, HAI-GEN, HEALTHI, HUMANIZE, TExSS, SOCIALIZE co-located with the ACM International +Conference on Intelligent User Interfaces (IUI 2022), Virtual Event, Helsinki, Finland, March 21-22, 2022 (CEUR Workshop Proceedings, Vol. 3124), Alison +Smith-Renner and Ofra Amir (Eds.). CEUR-WS.org, 107–122. +[80] Michael Muller and Angelika Stroymayer. 2022. Forgetting Practices in the Data Sciences. In Proceedings of the 2022 CHI Conference on Human +Factors in Computing Systems. In press. +[81] Michael Muller and Justin Weisz. 2022. Extending a Human-AI Collaboration Framework with Dynamism and Sociality. In 2022 Symposium on +Human-Computer Interaction for Work. 1–12. +[82] Michael Muller, Justin D. Weisz, and Werner Geyer. 2020. Mixed initiative generative AI interfaces: An analytic framework for generative AI +applications. ICCC 2020 Workshop, The Future of Co-Creative Systems. https://computationalcreativity.net/workshops/cocreative-iccc20/papers/ +Future_of_co-creative_systems_185.pdf +[83] Michael D Murray. 2022. Generative and AI Authored Artworks and Copyright Law. Available at SSRN (2022). +[84] Raja Parasuraman, Thomas B Sheridan, and Christopher D Wickens. 2000. A model for types and levels of human interaction with automation. +IEEE Transactions on systems, man, and cybernetics-Part A: Systems and Humans 30, 3 (2000), 286–297. +[85] Claudio Pinhanez. 2021. Expose Uncertainty, Instill Distrust, Avoid Explanations: Towards Ethical Guidelines for AI, in HCAI@NeurIPS 2021 +workshop. https://www.google.com/url?q=https%3A%2F%2Farxiv.org%2Fabs%2F2112.01281&sa=D +[86] Claudio Pinhanez. 2022. Breakdowns, Language Use, and Weird Errors: Past, Present, and Future of Research on Conversational Agents at BRL, in +IBM Research Cambridge Lab Guess Speaker Series. +[87] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. 2022. Hierarchical text-conditional image generation with clip latents. +arXiv preprint arXiv:2204.06125 (2022). +[88] Anais Resseguier and Rowena Rodrigues. 2021. Ethics as attention to context: recommendations for the ethics of artificial intelligence. Open +Research Europe 1, 27 (2021), 27. +[89] Laria Reynolds and Kyle McDonell. 2021. Prompt programming for large language models: Beyond the few-shot paradigm. In Extended Abstracts of +the 2021 CHI Conference on Human Factors in Computing Systems. 1–7. +[90] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-resolution image synthesis with latent diffusion +models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 10684–10695. +[91] Janus Rose. 2022. Facebook Pulls Its New ‘AI For Science’ Because It’s Broken and Terrible. Vice (November 2022). Retrieved 06-Jan-2023 from +https://www.vice.com/en/article/3adyw9/facebook-pulls-its-new-ai-for-science-because-its-broken-and-terrible +15 + +HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia +Weisz et al. 2023 +[92] Steven I Ross, Fernando Martinez, Stephanie Houde, Michael Muller, and Justin D Weisz. 2023. The Programmer’s Assistant: Conversational +Interaction with a Large Language Model for Software Development. In 28th International Conference on Intelligent User Interfaces. +[93] Baptiste Roziere, Marie-Anne Lachaux, Lowik Chanussot, and Guillaume Lample. 2020. Unsupervised Translation of Programming Languages.. In +NeurIPS. +[94] Devansh Saxena, Karla Badillo-Urquiola, Pamela J Wisniewski, and Shion Guha. 2021. A framework of high-stakes algorithmic decision-making for +the public sector developed through a case study of child-welfare. Proceedings of the ACM on Human-Computer Interaction 5, CSCW2 (2021), 1–41. +[95] Matthias Scheutz, Scott A DeLoach, and Julie A Adams. 2017. A framework for developing and using shared mental models in human-agent teams. +Journal of Cognitive Engineering and Decision Making 11, 3 (2017), 203–224. +[96] Isabella Seeber, Eva Bittner, Robert O Briggs, Triparna De Vreede, Gert-Jan De Vreede, Aaron Elkins, Ronald Maier, Alexander B Merz, Sarah +Oeste-Reiß, Nils Randrup, et al. 2020. Machines as teammates: A research agenda on AI in team collaboration. Information & management 57, 2 +(2020), 103174. +[97] Thomas B Sheridan and William L Verplank. 1978. Human and computer control of undersea teleoperators. Technical Report. Massachusetts Inst of +Tech Cambridge Man-Machine Systems Lab. +[98] Ben Shneiderman. 2020. Human-centered artificial intelligence: Reliable, safe & trustworthy. International Journal of Human–Computer Interaction +36, 6 (2020), 495–504. +[99] Ben Shneiderman. 2021. Human-Centered AI. Issues in Science and Technology 37, 2 (2021), 56–61. +[100] Ben Shneiderman. 2022. Human-Centered AI. Oxford University Press. +[101] Auste Simkute, Aditi Surana, Ewa Luger, Michael Evans, and Rhianne Jones. 2022. XAI for learning: Narrowing down the digital divide between +“new” and “old” experts. In Adjunct Proceedings of the 2022 Nordic Human-Computer Interaction Conference. 1–6. +[102] Karan Singhal, Shekoofeh Azizi, Tao Tu, S Sara Mahdavi, Jason Wei, Hyung Won Chung, Nathan Scales, Ajay Tanwani, Heather Cole-Lewis, +Stephen Pfohl, et al. 2022. Large Language Models Encode Clinical Knowledge. arXiv preprint arXiv:2212.13138 (2022). +[103] Katta Spiel. 2021. ” Why are they all obsessed with Gender?”—(Non) binary Navigations through Technological Infrastructures. In Designing +Interactive Systems Conference 2021. 478–494. +[104] Angie Spoto and Natalia Oleynik. 2017. Library of Mixed-Initiative Creative Interfaces. Retrieved 19-Jun-2021 from http://mici.codingconduct.cc/ +[105] Catherine Stupp. 2019. Fraudsters Used AI to Mimic CEO’s Voice in Unusual Cybercrime Case. The Wall Street Journal (August 2019). Retrieved +06-Jan-2023 from https://www.wsj.com/articles/fraudsters-use-ai-to-mimic-ceos-voice-in-unusual-cybercrime-case-11567157402 +[106] Jiao Sun, Q Vera Liao, Michael Muller, Mayank Agarwal, Stephanie Houde, Kartik Talamadupula, and Justin D Weisz. 2022. Investigating +Explainability of Generative AI for Code through Scenario-based Design. In 27th International Conference on Intelligent User Interfaces. 212–228. +[107] Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. Advances in neural information processing +systems 27 (2014). +[108] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin. 2017. Attention is +all you need. Advances in neural information processing systems 30 (2017). +[109] James Vincent. 2022. Ai-generated answers temporarily banned on coding Q&A site stack overflow. +Retrieved 06-Jan-2023 from https: +//www.theverge.com/2022/12/5/23493932/chatgpt-ai-generated-answers-temporarily-banned-stack-overflow-llms-dangers +[110] Patrick von Platen. 2020. How to generate text: using different decoding methods for language generation with Transformers. Hugging Face Blog +(March 2020). Retrieved 06-Jan-2023 from https://huggingface.co/blog/how-to-generate +[111] Dakuo Wang, Justin D Weisz, Michael Muller, Parikshit Ram, Werner Geyer, Casey Dugan, Yla Tausczik, Horst Samulowitz, and Alexander Gray. +2019. Human-AI collaboration in data science: Exploring data scientists’ perceptions of automated AI. Proceedings of the ACM on Human-Computer +Interaction 3, CSCW (2019), 1–24. +[112] Qiaosi Wang, Koustuv Saha, Eric Gregori, David Joyner, and Ashok Goel. 2021. Towards mutual theory of mind in human-ai interaction: How +language reflects what students perceive about a virtual teaching assistant. In Proceedings of the 2021 CHI Conference on Human Factors in Computing +Systems. 1–14. +[113] Laura Weidinger, John Mellor, Maribeth Rauh, Conor Griffin, Jonathan Uesato, Po-Sen Huang, Myra Cheng, Mia Glaese, Borja Balle, Atoosa +Kasirzadeh, et al. 2021. Ethical and social risks of harm from language models. arXiv preprint arXiv:2112.04359 (2021). +[114] Justin D Weisz, Mary Lou Maher, Hendrik Strobelt, Lydia B Chilton, David Bau, and Werner Geyer. 2022. HAI-GEN 2022: 3rd Workshop on +Human-AI Co-Creation with Generative Models. In 27th International Conference on Intelligent User Interfaces. 4–6. +[115] Justin D Weisz, Michael Muller, Stephanie Houde, John Richards, Steven I Ross, Fernando Martinez, Mayank Agarwal, and Kartik Talamadupula. +2021. Perfection Not Required? Human-AI Partnerships in Code Translation. In 26th International Conference on Intelligent User Interfaces. 402–412. +[116] Justin D Weisz, Michael Muller, Steven I Ross, Fernando Martinez, Stephanie Houde, Mayank Agarwal, Kartik Talamadupula, and John T Richards. +2022. Better together? an evaluation of ai-supported code translation. In 27th International Conference on Intelligent User Interfaces. 369–391. +[117] Alex Wilkins. 2022. Will AI text-to-image generators put illustrators out of a job? NewScientist (May 2022). +[118] Shuo Yang, Kai Shu, Suhang Wang, Renjie Gu, Fan Wu, and Huan Liu. 2019. Unsupervised fake news detection on social media: A generative +approach. In Proceedings of the AAAI conference on artificial intelligence, Vol. 33. 5644–5651. +16 + diff --git a/QNE5T4oBgHgl3EQfZA_G/content/tmp_files/load_file.txt b/QNE5T4oBgHgl3EQfZA_G/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..59a406627b09ab1b72a72f321345cb8abaf07fa9 --- /dev/null +++ b/QNE5T4oBgHgl3EQfZA_G/content/tmp_files/load_file.txt @@ -0,0 +1,1075 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf,len=1074 +page_content='Toward General Design Principles for Generative AI Applications JUSTIN D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' WEISZ, IBM Research AI, USA MICHAEL MULLER, IBM Research AI, USA JESSICA HE, IBM Research AI, USA STEPHANIE HOUDE, IBM Research AI, USA Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Seven principles for the design of generative AI systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Six of these principles are presented in overlapping circles, indicating their relationships to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One principle stands alone, the directive to design against potential harms that may be caused by a generative model’s output, misuse, or other harmful effects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' These principles are bounded in an environment of generative variability, in which the outputs of a generative AI application may vary in quantity, quality, character, or other characteristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative AI technologies are growing in power, utility, and use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' As generative technologies are being incorporated into mainstream applications, there is a need for guidance on how to design those applications to foster productive and safe use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Based on recent research on human-AI co-creation within the HCI and AI communities, we present a set of seven principles for the design of generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' These principles are grounded in an environment of generative variability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Six principles are focused on designing for characteristics of generative AI: multiple outcomes & imperfection;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' exploration & control;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' and mental models & explanations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In addition, we urge designers to design against potential harms that may be caused by a generative model’s hazardous output, misuse, or potential for human displacement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We anticipate these principles to usefully inform design decisions made in the creation of novel human-AI applications, and we invite the community to apply, revise, and extend these principles to their own work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' CCS Concepts: • Human-centered computing → HCI design and evaluation methods;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Interaction paradigms;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' HCI theory, concepts and models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Additional Key Words and Phrases: generative AI, design principles, human-centered AI, foundation models ACM Reference Format: Justin D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Weisz, Michael Muller, Jessica He, and Stephanie Houde.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Toward General Design Principles for Generative AI Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' ACM, New York, NY, USA, 16 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='org/XXXXXXX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='XXXXXXX 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Manuscript submitted to ACM 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='05578v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='HC] 13 Jan 2023 Generative variability Multiple outputs Control Imperfection Mental Exploration models Explanation Design against harmsHAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023 1 INTRODUCTION As generative AI technologies continue to grow in power and utility, their use is becoming more mainstream.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative models, including LLM-based foundation models [9], are being used for applications such as general Q&A (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' ChatGPT1), software engineering assistance (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Copilot2), task automation (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Adept3), copywriting (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Jasper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='ai4), and the creation of high-fidelity artwork (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' DALL-E 2 [87], Stable Diffusion [90], Midjourney5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Given the explosion in popularity of these new kinds of generative applications, there is a need for guidance on how to design those applications to foster productive and safe use, in line with human-centered AI values [100].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Fostering productive use is a challenge, as revealed in a recent literature survey by Campero et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' They found that many human-AI collaborative systems failed to achieve positive synergy – the notion that a human-AI team is able to accomplish superior outcomes above either party working alone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In fact, some studies have found the opposite effect, that human-AI teams produced inferior results to either a human or AI working alone [12, 22, 49, 53].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Fostering safe use is a challenge because of the potential risks and harms that stem from generative AI, either because of how the model was trained (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [113]) or because of how it is applied (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [46, 79]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In order to address these issues, we propose a set of design principles to aid the designers of generative AI systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' These principles are grounded in an environment of generative variability,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' indicating the two properties of generative AI systems inherently different from traditional discriminative6 AI systems: generative,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' because the aim of generative AI applications is to produce artifacts as outputs,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' rather than determine decision boundaries as discriminative AI systems do,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' and variability,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' indicating the fact that,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' for a given input,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' a generative system may produce a variety of possible outputs,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' many of which may be valid;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' in the discriminative case, it is expected that the output of a model does not vary for a given input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We note that our principles are meant to generally apply to generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Other sets of design principles exist for specific kinds of generative AI applications, including Liu and Chilton [63]’s guidelines for engineering prompts for text-to-image models, and advice about one-shot prompts for generation of texts of different kinds [25, 40, 89].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' There are also more general AI-related design guidelines [1, 5, 23, 48, 57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Six of our principles are presented as “design for.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='..” statements, indicating the characteristics that designers should keep in mind when making important design decisions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One is presented as a “design against.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='..” statement, directing designers to design against potential harms that may arise from hazardous model outputs, misuse, potential for human displacement, or other harms we have not yet considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The principles interact with each other in complex ways, schematically represented via overlapping circles in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, the characteristic denoted in one principle (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' multiple outputs) can sometimes be leveraged as a strategy for addressing another principle (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' exploration).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Principles are also connected by a user’s aims, such as producing a singular artifact, seeking inspiration or creative ideas, or learning about a domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' They are also connected by design features or attributes of a generative AI application, such as the support for versioning, curation, or sandbox environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1http://chat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='openai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com 2http://copilot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com 3http://adept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='ai 4http://jasper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='ai 5http://midjourney.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com 6Our use of the term discriminative is to indicate that the task conducted by the AI algorithm is one of determining to which class or group a data instance belongs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' classification and clustering algorithms are examples of discriminative AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Although our use of the term discriminative may evoke imagery of human discrimination (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' via racial, religious, gender identity, genetic, or other lines), our use follows the scientific convention established in the machine learning community (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=', https://en.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='wikipedia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='org/wiki/Discriminative_model) 2 Toward General Design Principles for Generative AI Applications HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Our aim for these principles is threefold: (1) to provide the designers of generative AI applications with the language to discuss issues unique to generative AI;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' (2) to provide strategies and guidance to help designers make important design decisions around how end users will interact with a generative AI application;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' and (3) to sensitize designers to the idea that generative AI applications may cause a variety of harms (likely inadvertently, but possibly intentionally).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We hope these principles provide the human-AI co-creation community with a reasoned way to think through the design of novel generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2 DESIGN PRINCIPLES FOR GENERATIVE AI APPLICATIONS We developed seven design principles for generative AI applications based on recent research in the HCI and AI communities, specifically around human-AI co-creative processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We conducted a literature review of research studies, guidelines, and analytic frameworks from these communities [1, 5, 23, 27, 39, 48, 57, 65, 68, 69, 81, 82, 96, 104], which included experiments in human-AI co-creation [2, 3, 55, 64, 106, 115, 116], examinations of representative generative applications [11, 50, 51, 64, 72, 87, 90, 92], and a review of publications in recent workshops [35, 77, 78, 114].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 The Environment: Generative Variability Generative AI technologies present unique challenges for designers of AI systems compared to discriminative AI systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' First, generative AI is generative in nature, which means their purpose is to produce artifacts as output, rather than decisions, labels, classifications, and/or decision boundaries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' These artifacts may be comprised of different types of media, such as text, images, audio, animations or videos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Second, the outputs of a generative AI model are variable in nature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Whereas discriminitive AI aims for deterministic outcomes, generative AI systems may not produce the same output for a given input each time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In fact, by design, they can produce multiple and divergent outputs for a given input, some or all of which may be satisfactory to the user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Thus, it may be difficult for users to achieve replicable results when working with a generative AI application.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Although the very nature of generative applications violates the common HCI principle that a system should respond consistently to a user’s input (for critiques of this position, see [6, 10, 24, 28, 36, 79]), we take the position that this environment in which generative applications operate – generative variability – is a core strength.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative applications enable users to explore or populate a “space” of possible outcomes to their query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Sometimes, this exploration is explicit, as in the case of systems that enable latent space manipulations of an artifact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Other times, exploration of a space occurs when a generative model produces multiple candidate outputs for a given input, such as multiple distinct images for a given prompt [87, 90] or multiple implementations of a source code program [115, 116].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Recent studies also show how users may improve their knowledge of a domain by working with a generative model and its variable outputs [92, 115].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' This concept of generative variability is crucially important for designers of generative AI applications to communicate to users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Users who approach a generative AI system without understanding its probabilistic nature and its capacity to produce varied outputs will struggle to interact with it in productive ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The design principles we outline in the following sections – designing for multiple outcomes & imperfection, for exploration & human control, and for mental models & explanations – are all rooted in the notion that generative AI systems are distinct and unique because they operate in an environment of generative variability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 3 HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 Design for Multiple Outputs Generative AI technologies such as encoder-decoder models [21, 107], generative adversarial networks [38], and transformer models [108] are probabilistic in nature and thus are capable of producing multiple, distinct outputs for a user’s input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Designers therefore need to understand the extent to which these multiple outputs should be visible to users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Do users need the ability to annotate or curate?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Do they need the ability to compare or contrast?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' How many outputs does a user need?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Understanding the user’s task can help answer these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' If the user’s task is one of production, in which the ultimate goal is to produce a single, satisfying artifact, then designs that help the user filter and visualize differences may be preferable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, a software engineer’s goal is often to implement a method that performs a specific behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Tools such as Copilot take a user’s input, such as a method signature or documentation, and provide a singular output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Contrarily, if the user’s task is one of exploration, then designs that help the user curate, annotate, and mutate may be preferable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, a software engineer may wish to explore a space of possible test cases for a code module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Or, an artist may wish to explore different compositions or styles to see a broad range of possibilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Below we discuss a set of strategies for helping design for multiple outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 Versioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Because of the randomness involved in the generative process, as well as other user-configurable parameters (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' a random seed, a temperature, or other types of user controls), it may be difficult for a user to produce exactly the same outcome twice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' As a user interacts with a generative AI application and creates a set of outputs, they may find that they prefer earlier outputs to later ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' How can they recover or reset the state of the system to generate such earlier outputs?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One strategy is to keep track of all of these outputs, as well as the parameters that produced them, by versioning them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Such versioning can happen manually (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' the user clicks a button to “save” their current working state) or automatically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 Curation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' When a generative model is capable of producing multiple outputs, users may need tools to curate those outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Curation may include collecting, filtering, sorting, selecting, or organizing outputs (possibly from the versioned queue) into meaningful subsets or groups, or creating prioritized lists or hierarchies of outputs according to some subjective or objective criteria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, CogMol7 generates novel molecular compounds, which can be sorted by various properties, such as their molecular weight, toxicity, or water solubility [18, 19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In addition, the confidence of the model in each output it produced may be a useful way to sort or rank outputs, although in some cases, model confidence scores may not be indicative of the quality of the model’s output [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3 Annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' When a generative model has produced a large number of outputs, users may desire to add marks, decorators, or annotations to outputs of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' These annotations may be applied to the output itself (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' “I like this”) or it may be applied to a portion or subset of the output (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' flagging lines of source code that look problematic and need review).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4 Visualizing Differences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In some cases, a generative model may produce a diverse set of distinct outputs, such as images of cats that look strikingly different from each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In other cases, a generative model may produce a set of outputs for which it is difficult to discern differences, such as a source code translation from one language to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In this case, tools that aid users in visualizing the similarities and differences between multiple outputs can be useful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Depending on the users’ goals, they may seek to find the invariant aspects across outcomes, such as identifying which 7http://covid19-mol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='mybluemix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='net 4 Toward General Design Principles for Generative AI Applications HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia parts of a source code translation were the same across multiple translations, indicating a confidence in its correctness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Or, users may prioritize the variant aspects for greater creativity and inspiration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, Sentient Sketchbook [59] is a video game co-creation system that displays a number of different metrics of the maps it generates, enabling users to compare newly-generated maps with their current map to understand how they differ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3 Design for Imperfection It is highly important for users to understand that the quality of a generative model’s outputs will vary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Users who expect a generative AI application to produce exactly the artifact they desire will experience frustration when they work with the system and find that it often produces imperfect artifacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' By “imperfect,” we mean that the artifact itself may have imperfections, such as visual misrepresentations in an image, bugs or errors in source code, missing desired elements (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' “an illustration of a bunny with a carrot” fails to include a carrot), violations of constraints specified in the input prompt (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' “write a 10 word sentence” produces a much longer or shorter sentence), or even untruthful or misleading answers (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' a summary of a scientific topic that includes non-existent references [91]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' But, “imperfect” can also mean “doesn’t satisfy the user’s desire,” such as when the user prompts a model and doesn’t get back any satisfying outputs (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' the user didn’t like any of the illustrations of a bunny with a carrot).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Below we discuss a set of strategies for helping design for imperfection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 Multiple Outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Our previous design principle is also a strategy for handling imperfect outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' If a generative model is allowed to produce multiple outputs, the likelihood that one of those outputs is satisfying to the user is increased.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One example of this effect is in how code translation models are evaluated, via a metric called 𝑝𝑎𝑠𝑠@𝑘 [56, 93].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The idea is that the model is allowed to produce 𝑘 code translations for a given input, and if any of them pass a set of unit tests, then the model is said to have produced a correct translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In this way, generating multiple outputs serves to mitigate the fact that the model’s most-likely output may be imperfect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' However, it is left up to the user to review the set of outputs and identify the one that is satisfactory;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' with multiple outputs that are very similar to each other, this task may be difficult [116], implying the need for a way to easily visualize differences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 Evaluation & Identification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Given that generative models may not produce perfect (or perfectly satisfying) outputs, they may still be able to provide users with a signal about the quality of its output, or indicate parts that require human review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' As previously discussed, a model’s per-output confidence scores may be used (with care) to indicate the quality of a model’s output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Or, domain-specific metrics (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' molecular toxicity, compiler errors) may be useful indicators to evaluate whether an artifact achieved a desirable level of quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Thus, evaluating the quality of generated artifacts and identifying which portions of those artifacts may contain imperfections (and thus require human review, discussed further in Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [115]) can be an effective way for handling imperfection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3 Co-Creation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' User experiences that allow for co-creation, in which both the user and the AI can edit a candidate artifact, will be more effective than user experiences that assume or aim for the generative model to produce a perfect output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Allowing users to edit a model’s outputs provides them with the opportunity to find and fix imperfections, and ultimately achieve a satisfactory artifact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One example of this idea is Github Copilot [37], which is embedded in the VSCode IDE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In the case when Copilot produces an imperfect block of source code, developers are able to edit it right in context without any friction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' By contrast, tools like Midjourney or Stable Diffusion only produce a gallery of images to chose from;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' editing those images requires the user to shift to a different environment (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Photoshop).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 5 HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4 Sandbox / Playground Environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A sandbox or playground environment ensures that when a user interacts with a generated artifact, their interactions (such as edits, manipulations, or annotations) do not impact the larger context or environment in which they are working.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Returning to the example of Github Copilot, since it is situated inside a developer’s IDE, code it produces is directly inserted into the working code file.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Although this design choice enables co-creation, it also poses a risk that imperfect code is injected into a production code base.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A sandbox environment that requires users to explicitly copy and paste code in order to commit it to the current working file may guard against the accidental inclusion of imperfect outputs in a larger environment or product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4 Design for Human Control Keeping humans in control of AI systems is a core tenet of human-centered AI [98–100].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Providing users with controls in generative applications can improve their experience by increasing their efficiency, comprehension, and ownership of generated outcomes [64].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' But, in co-creative contexts, there are multiple ways to interpret what kinds of “control” people need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We identify three kinds of controls applicable to generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 Generic Controls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One aspect of control relates to the exploration of a design space or range of possible outcomes (as discussed in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Users need appropriate controls in order to drive their explorations, such as control over the number of outputs produced from an input or the amount of variability present in the outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We refer to these kinds of controls as generic controls, as they are applicable to any particular generative technology or domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' As an example, some generative projects may involve a “lifecycle” pattern in which users benefit from seeing a great diversity of outputs in early stages of the process in order to search for ideas, inspirations, or directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Later stages of the project may focus on a smaller number (or singular) output, requiring controls that specifically operate on that output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Many generative algorithms include a user-controllable parameter called temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A low temperature setting produces outcomes that are very similar to each other;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' conversely, a high temperature setting produces outcomes that are very dissimilar to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In the “lifecycle” model, users may first set a high temperature for increased diversity, and then reduce it when they wish to focus on a particular area of interest in the output space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' This effect was observed in a study of a music co-creation tool, in which novice users dragged temperature control sliders to the extreme ends to explore the limits of what the AI could generate [64].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 Technology-specific Controls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Other types of controls will depend on the particular generative technology being employed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Encoder-decoder models, for example, often allow users to perform latent space manipulations of an artifact in order to control semantically-meaningful attributes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, Liu and Chilton [62] demonstrate how semantic sliders can be used to control attributes of 3D models of animals, such as the animal’s torso length, neck length, and neck rotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Transformer models use a temperature parameter to control the amount of randomness in the generation process [110].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Natural language prompting, and the emerging discipline of prompt engineering [63], provide additional ways to tune or tweak the outputs of large language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We refer to these kinds of controls as technology-specific controls, as the controls exposed to a user in a user interface will depend upon the particular generative AI technology used in the application.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3 Domain-specific Controls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Some types of user controls will be domain-specific, dependent on the type of artifact being produced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, generative models that produce molecules as output might be controlled by having the user specify desired properties such as molecular weight or water solubility;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' these types of constraints might be propagated to the model itself (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' expressed as a constraint in the encoder phase), or they may simply act as a filter on 6 Toward General Design Principles for Generative AI Applications HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia the model’s output (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' hide anything from the user that doesn’t satisfy the constraints).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In either case, the control itself is dependent on the fact that the model is producing a specific kind of artifact, such as a molecule, and would not logically make sense for other kinds of artifacts in other domains (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' how would you control the water solubility for a text-to-image model?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Thus, we refer to these types of controls, independent of how they are implemented, as domain specific.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Other examples of domain-specific controls include the reading level of a text, the color palette or artistic style of an image, or the run time or memory efficiency of source code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='5 Design for Exploration Because users are working in an environment of generative variability, they will need some way to “explore” or “navigate” the space of potential outputs in order to identify one (or more) that satisfies their needs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Below we discuss a set of strategies for helping design for exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 Multiple Outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The ability for a generative model to produce multiple outputs (Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2) is an enabler of exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Returning to the bunny and carrot example, an artist may wish to explore different illustrative styles and prompt (and re-prompt) the model for additional candidates of “a bunny with a carrot” in various kinds of styles or configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Or, a developer can explore different ways to implement an algorithm by prompting (and re-prompting) a model to produce implementations that possess different attributes (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' “implement this using recursion,” “implement this using iteration,” or “implement this using memoization”).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In this way, a user can get a sense of the different possibilities the model is capable of producing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 Control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Depending on the specific technical architecture used by the generative application, there may be different ways for users to control it (Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' No matter the specific mechanisms of control, providing controls to a user provides them with the ability to interactively work with the model to explore the space of possible outputs for their given input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3 Sandbox / Playground Environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A sandbox or playground environment can enable exploration by providing a separate place in which new candidates can be explored, without interfering with a user’s main working environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, in a project using Copilot, Cheng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [17] suggest providing, “a sandbox mechanism to allow users to play with the prompt in the context of their own project.” 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4 Visualization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One way to help users understand the space in which they are exploring is to explicitly visualize it for them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Kreminski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [55] introduce the idea of expressive range coverage analysis (ERCA) in which a user is shown a visualization of the “range” of possible generated artifacts across a variety of metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Then, as users interact with the system and produce specific artifact instances, those instances are included in the visualization to show how much of the “range” or “space” was explored by the user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='6 Design for Mental Models Users form mental models when they work with technological systems [31, 70, 95].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' These models represent the user’s understanding of how the system works and how to work with it effectively to produce the outcomes they desire.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Due to the environment of generative variability, generative AI applications will pose new challenges to users because these applications may violate existing mental models of how computing systems behave (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' in a deterministic fashion).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Therefore, we recommend designing to support users in creating accurate mental models of generative AI applications in the following ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 7 HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 Orientation to Generative Variability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Users may need a general introduction to the concept of generative AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' They should understand that the system may produce multiple outputs for their query (Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2), that those outputs may contain flaws or imperfections (Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3), and that their effort may be required to collaborate with the system in order to produce desired artifacts via various kinds of controls (Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 Role of the AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Research in human-AI interaction suggests that users may view an AI application as filling a role such as an assistant, coach, or teammate [96].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In a study of video game co-creation, Guzdial et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [41] found participants to ascribe roles of friend, collaborator, student, and manager to the AI system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Recent work by Ross et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [92] examined software engineers’ role orientations toward a programming assistant and found that people viewed the assistant with a tool orientation, but interacted with it as if it were a social agent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Clearly establishing the role of a generative AI application in a user’s workflow, as well as its level of autonomy (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [32, 45, 84, 97]), will help users better understand how to interact effectively with it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Designers can reason about the role of their application by answering questions such as, is it a tool or partner?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' does it act proactively or does it just respond to the user?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' does it make changes to an artifact directly or does it simply make recommendations for the user?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='7 Design for Explanations Generative AI applications will be unfamiliar and possibly unusual to many users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' They will want to know what the application can (and cannot) do, how well it works, and how to work with it effectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Some users may even wish to understand the technical details of how the underlying generative AI algorithms work, although these details may not be necessary to work effectively with the model (as discussed in [115]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In recent years, the explainable AI (XAI) community has made tremendous progress at developing techniques for explaining how AI systems work [7, 30, 57, 58, 101].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Much of the work in XAI has focused on discriminative algorithms: how they generally make decisions (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' via interpretable models [74, Chapter 5] or feature importance [74, Section 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='5], and why they make a decision in a specific instance (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' via counterfactual explanations [74, Section 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Recent work in human-centered XAI (HCXAI) has emphasized designing explanations that cater to human knowledge and human needs [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' This work grew out of a general shift toward human-centered data science [6], in which the import of explanations is not for a technical user (data scientist), but for an end user who might be impacted by a machine learning model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In the case of generative AI, recent work has begun to explore the needs for explainability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [106] explored explainability needs of software engineers working with a generative AI model for various types of use cases, such as code translation and autocompletion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' They identified a number of types of questions that software engineers had about the generative AI, its capabilities, and its limitations, indicating that explainability is an important feature for generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' They also identified several gaps in existing explainability frameworks stemming from the generative nature of the AI system, indicating that existing XAI techniques may not be sufficient for generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Thus, we make the following recommendations for how to design for explanations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 Calibrate Trust by Communicating Capabilities and Limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Because of the inherent imperfection of generative AI outputs, users would be well-served if they understood the limitations of these systems [80, 85], allowing them to calibrate their trust in terms of what the application can and cannot do [86].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' When these kinds of imperfections (Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3) are not signaled, users of co-creative tools may mistakenly blame themselves for shortcomings of generated artifacts in co-creative applications [64], and users in Q & A use cases can be shown deceptive misconceptions and harmful falsehoods as objective answers [60].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One way to communicate the capabilities of a generative AI application 8 Toward General Design Principles for Generative AI Applications HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia is to show examples of what it can do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, Midjourney provides a public discussion space to orient new users and show them what other users have produced with the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' This space not only shows the outputs of the model (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' images), but the textual prompts that produced the images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In this way, users can more quickly come to understand how different prompts influence the application’s output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' To communicate limitations, systems like ChatGPT contain modal screens to inform users of the system’s limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 Use Explanations to Create and Reinforce Accurate Mental Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [115] explored how a generative model’s confidence could be surfaced in a user interface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Working with a transformer model on a code translation task, they developed a prototype UI that highlighted tokens in the translation that the model was not confident in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In their user study, they found that those highlights also served as explanations for how the model worked: users came to understand that each source code token was chosen probabilistically, and that the model had considered other alternatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' This design transformed an algorithmic weakness (imperfect output) into a resource for users to understand how the algorithm worked, and ultimately, to control its output (by showing users where they might need to make changes).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='8 Design Against Harms The use of AI systems – including generative AI applications – may unfortunately lead to diverse forms of harms, especially for people in vulnerable situations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Much work in AI ethics communities has identified how discriminative AI systems may perpetuate harms such as the denial of personhood or identity [24, 52, 103];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' the deprivation of liberty or children [66, 94], and the erasure of persons, cultures, or nations through data silences [80].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We identify four types of potential harms, some of which are unique to the generative domain, and others which represent existing risks of AI applications that may manifest in new ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Our aim in this section is to sensitize designers to the potential risks and harms that generative AI systems may pose.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We do not prescribe solutions to address these risks, in part because it is an active area of research to understand how these kinds of risks could be mitigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Risk identification, assessment, and mitigation is a sociotechnical problem involving computing resources, humans, and cultures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Even with our focus on the design of generative applications, an analysis of harms that is limited to design concepts may blur into technosolutionism [61, 67, 88].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We do posit that human-centered approaches to generative AI design are a useful first step, but must be part of a larger strategy to understand who are the direct and indirect stakeholders of a generative application [34, 44], and to work directly with those stakeholders to identify harms, understand what are their differing priorities and value tensions [73], and negotiate issues of culture, policy, and (yes) technology to meet these diverse challenges (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=', [26, 29, 42]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 Hazardous Model Outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative AI applications may produce artifacts that cause harm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In an integrative survey paper, Weidinger et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [113] list six types of potential harms of large language models, three of which regard the harms that may be caused by the model’s output: Discrimination, Exclusion, and Toxicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative models may produce outputs that promote discrimina- tion against certain groups, exclude certain groups from representation, or produce toxic content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Examples include text-to-image models that fail to produce ethnically diverse outputs for a given input (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' a request for images of doctors produces images of male, white doctors [20] or language models that produce inappropriate language such as swear words, hate speech, or offensive content [1, 48].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 9 HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023 Information Hazards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative models may inadvertently leak private or sensitive information from their training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, Carlini et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [16] found that strategically prompting GPT-2 revealed an individual’s full name, work address, phone number, email, and fax number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Additionally, larger models may be more vulnerable to these types of attacks [15, 16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Misinformation Harms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative models may produce inaccurate misinformation in response to a user’s query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [60] found that GPT-3 can provide false answers that mimic human falsehoods and misconceptions, such as “coughing can help stop a heart attack” or “[cold weather] tells us that global warming is a hoax”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Singhal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [102] caution against the tendency of LLMs to hallucinate references, especially if consulted for medical decisions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Albrecht et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [4] claim that LLMs have few defenses against adversarial attacks while advising about ethical questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The Galactica model was found to hallucinate non-existent scientific references [43], and Stack Overflow has banned responses sourced from ChatGPT due to their high rate of incorrect, yet plausible, responses [109].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In addition to those harms, a generative model’s outputs may be hazardous in other ways as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Deceit, Impersonation, and Manipulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative algorithms can be used to create false records or “deep fakes” (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=', [46, 71]), to impersonate others (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [105]), or to distort information into politically-altered content [118].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In addition, they may manipulate users who believe that they are chatting with another human rather than with an algorithm, as in the case of an unreviewed ChatGPT “experiment” in which at least 4,000 people seeking mental health support were connected to a chatbot rather than a human counselor [76].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Copyright, Licenses, and Intellectual Property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative models may have been trained on data protected by regulations such as the GDPR, which prohibits the re-use of data beyond the purposes for which it was collected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In addition, large language models have been referred to as “stochastic parrots” due to their ability to reproduce data that was used during their training [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One consequence of this effect is that the model may produce outputs that incorporate or remix materials that are subject to copyright or intellectual property protections [33, 47, 83].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, the Codex model, which produces source code as output, may (re-)produce source code that is copyrighted or subject to a software license, or that was openly shared under a creative commons license that prohibits commercial re-use (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=', in a pay-to-access LLM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Thus, the use of a model’s outputs in a project may cause that project to violate copyright protections, or subject that project to a restrictive license (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' GPL).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' As of this writing, there is a lawsuit against GitHub, Microsoft, and OpenAI on alleged copyright violations in the training of Codex [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 Misuse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Weidinger et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [113] describe how generative AI applications may be misused in ways unanticipated by the creators of those systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Examples include making disinformation cheaper and more effective, facilitating fraud and scams, assisting code generation for cyberattacks, or conducting illegitimate surveillance and censorship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In addition to these misuses, Houde et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [46] also identify business misuses of generative AI applications such as facilitating insurance fraud and fabricating evidence of a crime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Although designers may not be able to prevent users from intentionally misusing their generative AI applications, there may be preventative measures that make sense for a given application domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For example, output images may be watermarked to indicate they were generated by a particular model, blocklists may be used to disallow undesirable words in a textual prompt, or multiple people may be required to review or approve a model’s outputs before they can be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 10 Toward General Design Principles for Generative AI Applications HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='3 Human Displacement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' One consequence of the large-scale deployment of generative AI technologies is that they may come to replace, rather than augment human workers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Such concerns have been raised in related areas, such as the use of automated AI technologies in data science Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [111, 112].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Weidinger et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [113] specifically discuss the potential economic harms and inequalities that may arise as a consequence of widespread adoption of generative AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' If a generative model is capable of producing high-fidelity outputs that rival (or even surpass) what can be created by human effort, are the humans necessary anymore?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Contemporary fears of human displacement by generative technologies are beginning to manifest in mainstream media, such as in the case of illustrators’ concerns that text-to-image models such as Stable Diffusion and Midjourney will put them out of a job [117].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We urge designers to find ways to design generative AI applications that enhance or augment human abilities, rather than applications that aim to replace human workers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Copilot serves as one example of a tool that clearly enhances the abilities of a software engineer: it operates on the low-level details of a source code implementation, freeing up software engineers to focus more of their attention on higher-level architectural and system design issues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 3 DISCUSSION 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1 Designing for User Aims Users of generative AI applications may have varied aims or goals in using those systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Some users may be in pursuit of perfecting a singular artifact, such as a method implementation in a software program.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Other users may be in pursuit of inspiration or creative ideas, such as when exploring a visual design space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' As a consequence of working with a generative AI application, users may also enhance their own learning or understanding of the domain in which they are operating, such as when a software engineer learns something new about a programming language from the model’s output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Each of these aims can be supported by our design principles, as well as help designers determine the appropriate strategy for addressing the challenges posed by each principle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' To support artifact production, designers ought to carefully consider how to manage a model’s multiple, imperfect outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Interfaces ought to support users in curating, annotating, and mutating artifacts to help users refine a singular artifact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The ability to version artifacts, or show a history of artifact edits, may also be useful to enable users to revisit discarded options or undo undesirable modifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' For cases in which users seek to produce one “ideal” artifact that satisfies some criteria, controls that enable them to co-create with the generative tool can help them achieve their goal more efficiently, and explanations that signal or identify imperfections can tell them how close or far they are from the mark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' To support inspiration and creativity, designers also ought to provide adequate controls that enable users to explore a design space of possibilities [55, 75].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Visualizations that represent the design space can also be helpful as they can show which parts the user has vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' has not explored, enabling them to explore the novel parts of that space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Tools that help users manage, curate, and filter the different outputs created during their explorations can be extremely helpful, such as a digital mood board for capturing inspiring model outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Finally, to support learning how to effectively interact with a generative AI application, designers ought to help users create accurate mental models [54] through explanations [7, 30, 57, 58, 101].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Explanations can help answer general questions such as what a generative AI application is capable or not capable of generating, how the model’s controls impact its output, and how the model was trained and the provenance of its training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' They can also answer questions about a specific model output, such as how confident the model was in that output, which portions of that 11 HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023 output might need human review or revision, how to adjust or modify the input or prompt to adjust properties of the output, or what other options or alternatives exist for that output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='2 The Importance of Value-Sensitive Design in Mitigating Potential Harms Designers need to be sensitive to the potential harms that may be caused by the rapid maturation and widespread adoption of generative AI technologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Although sociotechnical means for mitigating these harms have yet to be developed, we recommend that designers use a Value Sensitive Design approach [34, 44] when reasoning about how to design generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' By clearly identifying the different stakeholders and impacted parties of a generative AI application, and explicitly enumerating their values, designers can make more reasoned judgments about how those stakeholders might be impacted by hazardous model outputs, model misuse, and issues of human displacement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 4 LIMITATIONS AND FUTURE WORK Generative AI applications are still in their infancy, and new kinds of co-creative user experiences are emerging at a rapid pace.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Thus, we consider these principles to be in their infancy as well, and it is possible that other important design principles, strategies, and/or user aims have been overlooked.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In addition, although these principles can provide helpful guidance to designers in making specific design decisions, they need to be validated in real-world settings to ensure their clarity and utility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 5 CONCLUSION We present a set of seven design principles for generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' These principles are grounded in an environment of generative variability, the key characteristics of which are that a generative AI application will generate artifacts as outputs, and those outputs may be varied in nature (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' of varied quality or character).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The principles focus on designing for multiple outputs and the imperfection of those outputs, designing for exploration of a space or range of possible outputs and maintaining human control over that exploration, and designing to establish accurate mental models of the generative AI application via explanations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We also urge designers to design against the potential harms that may be caused by hazardous model output (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' the production of inappropriate language or imagery, the reinforcement of existing stereotypes, or a failure to inclusively represent different groups), by misuse of the model (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' by creating disinformation or fabricating evidence), or by displacing human workers (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' by designing for the replacement rather than the augmentation of human workers).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We envision these principles to help designers make reasoned choices as they create novel generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' REFERENCES [1] ACM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Words Matter: Alternatives for Charged Terminology in the Computing Profession.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 04-January-2023 from https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='acm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' org/diversity-inclusion/words-matter [2] Mayank Agarwal, Jorge J Barroso, Tathagata Chakraborti, Eli M Dow, Kshitij Fadnis, Borja Godoy, Madhavan Pallan, and Kartik Talamadupula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Project clai: Instrumenting the command line as a new environment for ai agents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='00762 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [3] Mayank Agarwal, Kartik Talamadupula, Stephanie Houde, Fernando Martinez, Michael Muller, John Richards, Steven Ross, and Justin D Weisz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Quality Estimation & Interpretability for Code Translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the NeurIPS 2020 Workshop on Computer-Assisted Programming (NeurIPS 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [4] Joshua Albrecht, Ellie Kitanidis, and Abraham J Fetterman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Despite" super-human" performance, current LLMs are unsuited for decisions about ethics and safety.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='06295 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [5] Saleema Amershi, Dan Weld, Mihaela Vorvoreanu, Adam Fourney, Besmira Nushi, Penny Collisson, Jina Suh, Shamsi Iqbal, Paul N Bennett, Kori Inkpen, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Guidelines for human-AI interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2019 chi conference on human factors in computing systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [6] Cecilia Aragon, Shion Guha, Marina Kogan, Michael Muller, and Gina Neff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human-Centered Data Science: An Introduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' MIT Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 12 Toward General Design Principles for Generative AI Applications HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia [7] Vijay Arya, Rachel KE Bellamy, Pin-Yu Chen, Amit Dhurandhar, Michael Hind, Samuel C Hoffman, Stephanie Houde, Q Vera Liao, Ronny Luss, Aleksandra Mojsilovic, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' AI Explainability 360: An Extensible Toolkit for Understanding Data and Machine Learning Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 21, 130 (2020), 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [8] Emily M Bender, Timnit Gebru, Angelina McMillan-Major, and Shmargaret Shmitchell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='. In Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 610–623.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [9] Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' On the opportunities and risks of foundation models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='07258 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [10] Danah Boyd and Kate Crawford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Critical questions for big data: Provocations for a cultural, technological, and scholarly phenomenon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Information, communication & society 15, 5 (2012), 662–679.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [11] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Language Models are Few-Shot Learners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Larochelle, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Ranzato, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Hadsell, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Balcan, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Lin (Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' ), Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Curran Associates, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=', 1877–1901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://proceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='neurips.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='pdf [12] Zana Buçinca, Maja Barbara Malaya, and Krzysztof Z Gajos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' To trust or to think: cognitive forcing functions can reduce overreliance on AI in AI-assisted decision-making.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Proceedings of the ACM on Human-Computer Interaction 5, CSCW1 (2021), 1–21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [13] Matthew Butterick.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' GitHub Copilot Litigation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://githubcopilotlitigation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com [14] Andres Campero, Michelle Vaccaro, Jaeyoon Song, Haoran Wen, Abdullah Almaatouq, and Thomas W Malone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A Test for Evaluating Performance in Human-Computer Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='12390 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [15] Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Quantifying memorization across neural language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='07646 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [16] Nicholas Carlini, Florian Tramer, Eric Wallace, Matthew Jagielski, Ariel Herbert-Voss, Katherine Lee, Adam Roberts, Tom Brown, Dawn Song, Ulfar Erlingsson, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Extracting training data from large language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 30th USENIX Security Symposium (USENIX Security 21).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2633–2650.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [17] Ruijia Cheng, Ruotong Wang, Thomas Zimmermann, and Denae Ford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' "It would work for me too": How Online Communities Shape Software Developers’ Trust in AI-Powered Code Generation Tools.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='03491 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [18] Vijil Chenthamarakshan, Payel Das, Samuel C Hoffman, Hendrik Strobelt, Inkit Padhi, Kar Wai Lim, Benjamin Hoover, Matteo Manica, Jannis Born, Teodoro Laino, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Cogmol: target-specific and selective drug design for COVID-19 using deep generative models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='01215 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [19] Vijil Chenthamarakshan, Payel Das, Inkit Padhi, Hendrik Strobelt, Kar Wai Lim, Ben Hoover, Samuel C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Hoffman, and Aleksandra Mojsilovic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Target-Specific and Selective Drug Design for COVID-19 Using Deep Generative Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='01215 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='LG] [20] Jaemin Cho, Abhay Zala, and Mohit Bansal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Dall-eval: Probing the reasoning skills and social biases of text-to-image generative transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='04053 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [21] Kyunghyun Cho, Bart Van Merriënboer, Caglar Gulcehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Learning phrase representations using RNN encoder-decoder for statistical machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:1406.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1078 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [22] Elizabeth Clark, Anne Spencer Ross, Chenhao Tan, Yangfeng Ji, and Noah A Smith.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Creative writing with a machine in the loop: Case studies on slogans and stories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 23rd International Conference on Intelligent User Interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 329–340.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [23] Apple Computer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human Interface Guidelines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://developer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='apple.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/design/human-interface-guidelines/guidelines/overview [24] Sasha Costanza-Chock.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Design justice: Community-led practices to build the worlds we need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The MIT Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [25] Paul Denny, Viraj Kumar, and Nasser Giacaman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using Natural Language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='org/abs/2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='15157 [26] Norman K Denzin, Yvonna S Lincoln, Linda Tuhiwai Smith, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Handbook of critical and indigenous methodologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Sage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [27] Sebastian Deterding, Jonathan Hook, Rebecca Fiebrink, Marco Gillies, Jeremy Gow, Memo Akten, Gillian Smith, Antonios Liapis, and Kate Compton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Mixed-initiative creative interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2017 CHI Conference Extended Abstracts on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 628–635.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [28] Catherine D’ignazio and Lauren F Klein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Data feminism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' MIT press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [29] Carl DiSalvo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Design as democratic inquiry: putting experimental civics into practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' MIT Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [30] Upol Ehsan, Philipp Wintersberger, Q Vera Liao, Elizabeth Anne Watkins, Carina Manger, Hal Daumé III, Andreas Riener, and Mark O Riedl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human-Centered Explainable AI (HCXAI): beyond opening the black-box of AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In CHI Conference on Human Factors in Computing Systems Extended Abstracts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [31] Stephen M Fiore, Eduardo Salas, and Janis A Cannon-Bowers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Group dynamics and shared mental model development.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' How people evaluate others in organizations 234 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [32] Paul M Fitts, MS Viteles, NL Barr, DR Brimhall, Glen Finch, Eric Gardner, WF Grether, WE Kellum, and SS Stevens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1951.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human engineering for an effective air-navigation and traffic-control system, and appendixes 1 thru 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Technical Report.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Ohio State Univ Research Foundation Columbus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [33] Giorgio Franceschelli and Mirco Musolesi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Copyright in generative deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Data & Policy 4 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [34] Batya Friedman and David G Hendry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Value sensitive design: Shaping technology with moral imagination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Mit Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 13 HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023 [35] Werner Geyer, Lydia B Chilton, Justin D Weisz, and Mary Lou Maher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' HAI-GEN 2021: 2nd Workshop on Human-AI Co-Creation with Generative Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 26th International Conference on Intelligent User Interfaces-Companion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 15–17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [36] Lisa Gitelman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Raw Data is an Oxymoron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' MIT Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [37] Github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Copilot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 03-August-2021 from https://copilot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com [38] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative adversarial networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' ACM 63, 11 (2020), 139–144.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [39] Imke Grabe, Miguel González-Duque, Sebastian Risi, and Jichen Zhu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Towards a Framework for Human-AI Interaction Patterns in Co-Creative GAN Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Joint Proceedings of the ACM IUI Workshops 2022, March 2022, Helsinki, Finland (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [40] Cobus Greyling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Prompt engineering, text generation and large language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://cobusgreyling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='medium.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/prompt-engineering- text-generation-large-language-models-3d90c527c6d5 [41] Matthew Guzdial, Nicholas Liao, Jonathan Chen, Shao-Yu Chen, Shukan Shah, Vishwa Shah, Joshua Reno, Gillian Smith, and Mark O Riedl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Friend, collaborator, student, manager: How design of an ai-driven game level editor affects creators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2019 CHI conference on human factors in computing systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [42] Gillian R Hayes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Knowing by doing: action research as an approach to HCI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Ways of Knowing in HCI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Springer, 49–68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [43] Will Douglas Heaven.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Why Meta’s latest large language model survived only three days online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='technologyreview.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/2022/ 11/18/1063487/meta-large-language-model-ai-only-survived-three-days-gpt-3-science/ [44] David G Hendry, Batya Friedman, and Stephanie Ballard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Value sensitive design as a formative framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Ethics and Information Technology 23, 1 (2021), 39–44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [45] Eric Horvitz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Principles of Mixed-Initiative User Interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (Pittsburgh, Pennsylvania, USA) (CHI ’99).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Association for Computing Machinery, New York, NY, USA, 159–166.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='1145/302979.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 303030 [46] Stephanie Houde, Vera Liao, Jacquelyn Martino, Muller Muller, David Piorkowski, John Richards, Justin D Weisz, and Yunfeng Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Business (mis)Use Cases of Generative AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Joint Proceedings of the Workshops on Human-AI Co-Creation with Generative Models and User-Aware Conversational Agents co-located with 25th International Conference on Intelligent User Interfaces (IUI 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [47] Kalin Hristov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Artificial intelligence and the copyright dilemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Idea 57 (2016), 431.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [48] IBM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Racial Equity in Design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 04-January-2023 from https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='ibm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/design/racial-equity-in-design/ [49] Maia Jacobs, Melanie F Pradier, Thomas H McCoy, Roy H Perlis, Finale Doshi-Velez, and Krzysztof Z Gajos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' How machine-learning recommendations influence clinician treatment selections: the example of antidepressant selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Translational psychiatry 11, 1 (2021), 1–9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [50] Tristan E Johnson, Youngmin Lee, Miyoung Lee, Debra L O’Connor, Mohammed K Khalil, and Xiaoxia Huang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Measuring sharedness of team-related knowledge: Design and validation of a shared mental model instrument.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human Resource Development International 10, 4 (2007), 437–454.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [51] Benjamin Kaiser, Akos Csiszar, and Alexander Verl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative models for direct generation of cnc toolpaths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 2018 25th International Conference on Mechatronics and Machine Vision in Practice (M2VIP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' IEEE, 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [52] Shalini Kantayya.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Coded Bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 04-January-2023 from https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='pbs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='org/independentlens/documentaries/coded-bias/ [53] Bennett Kleinberg and Bruno Verschuere.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' How humans impair automated deception detection performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Acta Psychologica 213 (2021), 103250.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [54] Steven Kollmansberger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Helping students build a mental model of computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the fifteenth annual conference on Innovation and technology in computer science education.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 128–131.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [55] Max Kreminski, Isaac Karth, Michael Mateas, and Noah Wardrip-Fruin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Evaluating Mixed-Initiative Creative Interfaces via Expressive Range Coverage Analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='. In IUI Workshops.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 34–45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [56] Sumith Kulal, Panupong Pasupat, Kartik Chandra, Mina Lee, Oded Padon, Alex Aiken, and Percy S Liang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Spoc: Search-based pseudocode to code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Advances in Neural Information Processing Systems 32 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [57] Q Vera Liao, Daniel Gruen, and Sarah Miller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Questioning the AI: informing design practices for explainable AI user experiences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [58] Q Vera Liao, Moninder Singh, Yunfeng Zhang, and Rachel Bellamy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Introduction to explainable ai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [59] Antonios Liapis, Georgios N Yannakakis, Julian Togelius, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Sentient Sketchbook: Computer-aided game level authoring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='. In FDG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 213–220.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [60] Stephanie Lin, Jacob Hilton, and Owain Evans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Truthfulqa: Measuring how models mimic human falsehoods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='07958 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [61] Silvia Lindtner, Shaowen Bardzell, and Jeffrey Bardzell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Reconstituting the utopian vision of making: HCI after technosolutionism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1390–1402.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [62] Vivian Liu and Lydia B Chilton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Neurosymbolic Generation of 3D Animal Shapes through Semantic Controls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='. In IUI Workshops.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [63] Vivian Liu and Lydia B Chilton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Design Guidelines for Prompt Engineering Text-to-Image Generative Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [64] Ryan Louie, Andy Coenen, Cheng Zhi Huang, Michael Terry, and Carrie J Cai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Novice-AI music co-creation via AI-steering tools for deep generative models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 14 Toward General Design Principles for Generative AI Applications HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia [65] Todd Lubart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' How can computers be partners in the creative process: classification and commentary on the special issue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' International Journal of Human-Computer Studies 63, 4-5 (2005), 365–369.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [66] Alexandra Lyn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Risky Business: Artificial Intelligence and Risk Assessments in Sentencing and Bail Procedures in the United States.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Available at SSRN 3831441 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [67] Michael A Madaio, Luke Stark, Jennifer Wortman Vaughan, and Hanna Wallach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Co-designing checklists to understand organizational challenges and opportunities around fairness in AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [68] Mary Lou Maher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Computational and collective creativity: Who’s being creative?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='. In ICCC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Citeseer, 67–71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [69] Mary Lou Maher, Brian Magerko, Dan Venura, Douglas Fisher, Rogelio Cardona-rivera, Nancy Fulda, Johannes Gooth, Minwoo Lee, David Wilson, James Kaufman, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A Research Plan for Integrating Generative and Cognitive AI for Human Centered, Explainable Co-Creative AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In ACM CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [70] John E Mathieu, Tonia S Heffner, Gerald F Goodwin, Eduardo Salas, and Janis A Cannon-Bowers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The influence of shared mental models on team process and performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Journal of applied psychology 85, 2 (2000), 273.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [71] Edvinas Meskys, Julija Kalpokiene, Paulius Jurcys, and Aidas Liaudanskas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Regulating deep fakes: legal and ethical considerations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Journal of Intellectual Property Law & Practice 15, 1 (2020), 24–31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [72] Cade Metz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Meet GPT-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' It Has Learned to Code (and Blog and Argue).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' (Published 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='nytimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/2020/11/24/science/ artificial-intelligence-ai-gpt3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='html [73] Jessica K Miller, Batya Friedman, Gavin Jancke, and Brian Gill.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Value tensions in design: the value sensitive design, development, and appropriation of a corporation’s groupware system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2007 international ACM conference on Supporting group work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 281–290.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [74] Christoph Molnar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Interpretable machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Lulu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' com.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [75] Meredith Ringel Morris, Carrie J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Cai, Jess Holbrook, Chinmay Kulkarni, and Michael Terry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The Design Space of Generative Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the NeurIPS 2022 Workshop on Human-Centered AI (NeurIPS 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [76] Robert R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Morris.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' We provided mental health support to about 4,000 people — using GPT-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Here’s what happened.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 07-Jan-2023 from https://twitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/RobertRMorris/status/1611450197707464706 [77] Michael Muller, Plamen Agelov, Hal Daume, Q Vera Liao, Nuria Oliver, David Piorkowski, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' HCAI@NeurIPS 2022, Human Centered AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Annual Conference on Neural Information Processing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [78] Michael Muller, Lydia B Chilton, Anna Kantosalo, Charles Patrick Martin, and Greg Walsh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' GenAICHI: Generative AI and HCI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In CHI Conference on Human Factors in Computing Systems Extended Abstracts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [79] Michael Muller, Steven I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Ross, Stephanie Houde, Mayank Agarwal, Fernando Martinez, John T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Richards, Kartik Talamadupula, and Justin D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Weisz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Drinking Chai with Your (AI) Programming Partner: A Design Fiction about Generative AI for Software Engineering 107-122.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Joint Proceedings of the IUI 2022 Workshops: APEx-UI, HAI-GEN, HEALTHI, HUMANIZE, TExSS, SOCIALIZE co-located with the ACM International Conference on Intelligent User Interfaces (IUI 2022), Virtual Event, Helsinki, Finland, March 21-22, 2022 (CEUR Workshop Proceedings, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 3124), Alison Smith-Renner and Ofra Amir (Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' CEUR-WS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='org, 107–122.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [80] Michael Muller and Angelika Stroymayer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Forgetting Practices in the Data Sciences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [81] Michael Muller and Justin Weisz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Extending a Human-AI Collaboration Framework with Dynamism and Sociality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 2022 Symposium on Human-Computer Interaction for Work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [82] Michael Muller, Justin D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Weisz, and Werner Geyer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Mixed initiative generative AI interfaces: An analytic framework for generative AI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' ICCC 2020 Workshop, The Future of Co-Creative Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://computationalcreativity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='net/workshops/cocreative-iccc20/papers/ Future_of_co-creative_systems_185.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='pdf [83] Michael D Murray.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Generative and AI Authored Artworks and Copyright Law.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Available at SSRN (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [84] Raja Parasuraman, Thomas B Sheridan, and Christopher D Wickens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A model for types and levels of human interaction with automation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' IEEE Transactions on systems, man, and cybernetics-Part A: Systems and Humans 30, 3 (2000), 286–297.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [85] Claudio Pinhanez.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Expose Uncertainty, Instill Distrust, Avoid Explanations: Towards Ethical Guidelines for AI, in HCAI@NeurIPS 2021 workshop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='google.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/url?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='q=https%3A%2F%2Farxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='org%2Fabs%2F2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='01281&sa=D [86] Claudio Pinhanez.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Breakdowns, Language Use, and Weird Errors: Past, Present, and Future of Research on Conversational Agents at BRL, in IBM Research Cambridge Lab Guess Speaker Series.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [87] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Hierarchical text-conditional image generation with clip latents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='06125 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [88] Anais Resseguier and Rowena Rodrigues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Ethics as attention to context: recommendations for the ethics of artificial intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Open Research Europe 1, 27 (2021), 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [89] Laria Reynolds and Kyle McDonell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Prompt programming for large language models: Beyond the few-shot paradigm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [90] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' High-resolution image synthesis with latent diffusion models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 10684–10695.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [91] Janus Rose.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Facebook Pulls Its New ‘AI For Science’ Because It’s Broken and Terrible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Vice (November 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 06-Jan-2023 from https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='vice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/en/article/3adyw9/facebook-pulls-its-new-ai-for-science-because-its-broken-and-terrible 15 HAIGEN ’23 Workshop at IUI ’23, March 27-31, 2023, Sydney, NSW, Australia Weisz et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023 [92] Steven I Ross, Fernando Martinez, Stephanie Houde, Michael Muller, and Justin D Weisz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The Programmer’s Assistant: Conversational Interaction with a Large Language Model for Software Development.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 28th International Conference on Intelligent User Interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [93] Baptiste Roziere, Marie-Anne Lachaux, Lowik Chanussot, and Guillaume Lample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Unsupervised Translation of Programming Languages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='. In NeurIPS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [94] Devansh Saxena, Karla Badillo-Urquiola, Pamela J Wisniewski, and Shion Guha.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A framework of high-stakes algorithmic decision-making for the public sector developed through a case study of child-welfare.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Proceedings of the ACM on Human-Computer Interaction 5, CSCW2 (2021), 1–41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [95] Matthias Scheutz, Scott A DeLoach, and Julie A Adams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' A framework for developing and using shared mental models in human-agent teams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Journal of Cognitive Engineering and Decision Making 11, 3 (2017), 203–224.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [96] Isabella Seeber, Eva Bittner, Robert O Briggs, Triparna De Vreede, Gert-Jan De Vreede, Aaron Elkins, Ronald Maier, Alexander B Merz, Sarah Oeste-Reiß, Nils Randrup, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Machines as teammates: A research agenda on AI in team collaboration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Information & management 57, 2 (2020), 103174.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [97] Thomas B Sheridan and William L Verplank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1978.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human and computer control of undersea teleoperators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Technical Report.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Massachusetts Inst of Tech Cambridge Man-Machine Systems Lab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [98] Ben Shneiderman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human-centered artificial intelligence: Reliable, safe & trustworthy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' International Journal of Human–Computer Interaction 36, 6 (2020), 495–504.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [99] Ben Shneiderman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human-Centered AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Issues in Science and Technology 37, 2 (2021), 56–61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [100] Ben Shneiderman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human-Centered AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Oxford University Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [101] Auste Simkute, Aditi Surana, Ewa Luger, Michael Evans, and Rhianne Jones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' XAI for learning: Narrowing down the digital divide between “new” and “old” experts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Adjunct Proceedings of the 2022 Nordic Human-Computer Interaction Conference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [102] Karan Singhal, Shekoofeh Azizi, Tao Tu, S Sara Mahdavi, Jason Wei, Hyung Won Chung, Nathan Scales, Ajay Tanwani, Heather Cole-Lewis, Stephen Pfohl, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Large Language Models Encode Clinical Knowledge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='13138 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [103] Katta Spiel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' ” Why are they all obsessed with Gender?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='—(Non) binary Navigations through Technological Infrastructures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Designing Interactive Systems Conference 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 478–494.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [104] Angie Spoto and Natalia Oleynik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Library of Mixed-Initiative Creative Interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 19-Jun-2021 from http://mici.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='codingconduct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='cc/ [105] Catherine Stupp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Fraudsters Used AI to Mimic CEO’s Voice in Unusual Cybercrime Case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' The Wall Street Journal (August 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 06-Jan-2023 from https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='wsj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/articles/fraudsters-use-ai-to-mimic-ceos-voice-in-unusual-cybercrime-case-11567157402 [106] Jiao Sun, Q Vera Liao, Michael Muller, Mayank Agarwal, Stephanie Houde, Kartik Talamadupula, and Justin D Weisz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Investigating Explainability of Generative AI for Code through Scenario-based Design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 27th International Conference on Intelligent User Interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 212–228.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [107] Ilya Sutskever, Oriol Vinyals, and Quoc V Le.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Sequence to sequence learning with neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Advances in neural information processing systems 27 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [108] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Attention is all you need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Advances in neural information processing systems 30 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [109] James Vincent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Ai-generated answers temporarily banned on coding Q&A site stack overflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 06-Jan-2023 from https: //www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='theverge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='com/2022/12/5/23493932/chatgpt-ai-generated-answers-temporarily-banned-stack-overflow-llms-dangers [110] Patrick von Platen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' How to generate text: using different decoding methods for language generation with Transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Hugging Face Blog (March 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Retrieved 06-Jan-2023 from https://huggingface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='co/blog/how-to-generate [111] Dakuo Wang, Justin D Weisz, Michael Muller, Parikshit Ram, Werner Geyer, Casey Dugan, Yla Tausczik, Horst Samulowitz, and Alexander Gray.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human-AI collaboration in data science: Exploring data scientists’ perceptions of automated AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Proceedings of the ACM on Human-Computer Interaction 3, CSCW (2019), 1–24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [112] Qiaosi Wang, Koustuv Saha, Eric Gregori, David Joyner, and Ashok Goel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Towards mutual theory of mind in human-ai interaction: How language reflects what students perceive about a virtual teaching assistant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 1–14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [113] Laura Weidinger, John Mellor, Maribeth Rauh, Conor Griffin, Jonathan Uesato, Po-Sen Huang, Myra Cheng, Mia Glaese, Borja Balle, Atoosa Kasirzadeh, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Ethical and social risks of harm from language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' arXiv preprint arXiv:2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content='04359 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [114] Justin D Weisz, Mary Lou Maher, Hendrik Strobelt, Lydia B Chilton, David Bau, and Werner Geyer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' HAI-GEN 2022: 3rd Workshop on Human-AI Co-Creation with Generative Models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 27th International Conference on Intelligent User Interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 4–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [115] Justin D Weisz, Michael Muller, Stephanie Houde, John Richards, Steven I Ross, Fernando Martinez, Mayank Agarwal, and Kartik Talamadupula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Perfection Not Required?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Human-AI Partnerships in Code Translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 26th International Conference on Intelligent User Interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 402–412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [116] Justin D Weisz, Michael Muller, Steven I Ross, Fernando Martinez, Stephanie Houde, Mayank Agarwal, Kartik Talamadupula, and John T Richards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Better together?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' an evaluation of ai-supported code translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In 27th International Conference on Intelligent User Interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 369–391.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [117] Alex Wilkins.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Will AI text-to-image generators put illustrators out of a job?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' NewScientist (May 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' [118] Shuo Yang, Kai Shu, Suhang Wang, Renjie Gu, Fan Wu, and Huan Liu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' Unsupervised fake news detection on social media: A generative approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' In Proceedings of the AAAI conference on artificial intelligence, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 5644–5651.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} +page_content=' 16' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/QNE5T4oBgHgl3EQfZA_G/content/2301.05578v1.pdf'} diff --git a/R9E4T4oBgHgl3EQfLAxE/vector_store/index.pkl b/R9E4T4oBgHgl3EQfLAxE/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..521987c44bd9af0b8e44bdebb3e3ba8f1171528c --- /dev/null +++ b/R9E4T4oBgHgl3EQfLAxE/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8c315e36ec4237cc38e8a808ba90b4d18cf9a7f56ac96a78290ead9617dae5e +size 199264 diff --git a/RdA0T4oBgHgl3EQfDv9U/content/2301.02007v1.pdf b/RdA0T4oBgHgl3EQfDv9U/content/2301.02007v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d673fd7a4658bd6ba64c1833b36c68ca6b736e16 --- /dev/null +++ b/RdA0T4oBgHgl3EQfDv9U/content/2301.02007v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06f9bd654b9ce15583dda434ebc88261a06d56a996747d6d8b7f2f70ba5bd233 +size 686456 diff --git a/RdE3T4oBgHgl3EQfygvM/content/2301.04721v1.pdf b/RdE3T4oBgHgl3EQfygvM/content/2301.04721v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4be2b21124e6573a240bfedcd4bea4079b839e40 --- /dev/null +++ b/RdE3T4oBgHgl3EQfygvM/content/2301.04721v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62430fb9734cbb439f8371c04cf2e589e15a15079b409850fb61d49ded62b2ca +size 5519143 diff --git a/T9E4T4oBgHgl3EQfmg0h/content/2301.05168v1.pdf b/T9E4T4oBgHgl3EQfmg0h/content/2301.05168v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cc37fa8121243447912c52a5ef798d50e75b1670 --- /dev/null +++ b/T9E4T4oBgHgl3EQfmg0h/content/2301.05168v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97458f647026d16d2b4d82d42de40237275bc6e974670711cb3366ab4af4c030 +size 10913502 diff --git a/T9E4T4oBgHgl3EQfmg0h/vector_store/index.faiss b/T9E4T4oBgHgl3EQfmg0h/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..055ba09b5e23486fde1c058923f2b2a91b35f5ca --- /dev/null +++ b/T9E4T4oBgHgl3EQfmg0h/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d95923da751d865b7746fc25cff8d93e7447a8dcd84ff6fd305c83f6a09260d +size 4718637 diff --git a/UtAzT4oBgHgl3EQfX_xg/vector_store/index.faiss b/UtAzT4oBgHgl3EQfX_xg/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..864b2078d5931ee3edb7c57123ddd8a580f7781d --- /dev/null +++ b/UtAzT4oBgHgl3EQfX_xg/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d0a7d09154449418619f9dd8bb062e8253b149ecd65c557a9e25b92010f62c1 +size 4259885 diff --git a/VtAyT4oBgHgl3EQfV_ek/vector_store/index.faiss b/VtAyT4oBgHgl3EQfV_ek/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..0ed58838b5da0d3ad130c74c5ab81d92e43f71df --- /dev/null +++ b/VtAyT4oBgHgl3EQfV_ek/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b28faa269a2c3648214d552fd1ad2755c0c9e41f74e43abe174e87cc8b672bd8 +size 3801133 diff --git a/W9AzT4oBgHgl3EQfmf0I/vector_store/index.pkl b/W9AzT4oBgHgl3EQfmf0I/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..0f5451d3059264322e01a0fef343fc197e70581a --- /dev/null +++ b/W9AzT4oBgHgl3EQfmf0I/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e11cc0cbcb492f122f9fa127e81609cfbb808227d2f355503a1acd892c9710b7 +size 169471 diff --git a/WNE2T4oBgHgl3EQfDgYF/content/2301.03624v1.pdf b/WNE2T4oBgHgl3EQfDgYF/content/2301.03624v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f0e8dd09840474205ec7de02a633464a573a9f98 --- /dev/null +++ b/WNE2T4oBgHgl3EQfDgYF/content/2301.03624v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f37de21d6420fe74e386d1be6336b4850f2b2fd65ea456d51623f24045b50a +size 2957993 diff --git a/WNE2T4oBgHgl3EQfDgYF/vector_store/index.pkl b/WNE2T4oBgHgl3EQfDgYF/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..bcd8d8e19c56914a9844b98c408379af7aa812ea --- /dev/null +++ b/WNE2T4oBgHgl3EQfDgYF/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af390e9789e3922d3dd4a9b620f4fc7bf90668ef7d115a3f07122eac7fd96b5f +size 341677 diff --git a/WtFIT4oBgHgl3EQfiCtg/content/tmp_files/2301.11290v1.pdf.txt b/WtFIT4oBgHgl3EQfiCtg/content/tmp_files/2301.11290v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..f5d0354f5921d11acab9bc9451a2c4294128bd18 --- /dev/null +++ b/WtFIT4oBgHgl3EQfiCtg/content/tmp_files/2301.11290v1.pdf.txt @@ -0,0 +1,775 @@ +IEEE +1 +Graph Encoder Ensemble for Simultaneous +Vertex Embedding and Community Detection +Cencheng Shen, Youngser Park, Carey E. Priebe +Abstract—In this paper we propose a novel and computationally efficient method to simultaneously achieve vertex embedding, +community detection, and community size determination. By utilizing a normalized one-hot graph encoder and a new rank-based +cluster size measure, the proposed graph encoder ensemble algorithm achieves excellent numerical performance throughout a variety +of simulations and real data experiments. +Index Terms—Graph Embedding, Vertex Clustering, L2 Normalization +! +1 +INTRODUCTION +G +RAPH data consists of a collection of vertices and edges +representing the pairwise relationship. Given n ver- +tices and s edges, a graph (or network) can be represented +by an n × n adjacency matrix A where A(i, j) is the edge +weight between the ith vertex and jth vertex. In practice, it +is often stored by an s × 3 edgelist E, where the first two +columns store the vertex indices of each edge and the last +column is the edge weight. +Community detection [1], [2], [3], [4] is a fundamental +problem for graph data. Often, the vertices can be nat- +urally separated into several communities, where within- +community vertices are more connected than between- +community vertices. This problem is also called vertex clus- +tering or graph partition, and many algorithms have been +proposed on this end. +For example, embedding-based methods like adjacency +or Laplacian spectral clustering are provably consistent +under popular random graph models [5], [6], as well as +likelihood-based techniques [4], [7] — however, they are +typically slow and not scalable to large graphs. On the other +hand, modularity-based methods like Louvain and Leiden +are much faster and thus more popular in practice [8], [9] +— however, there is relatively little theoretical investigation, +and the output only has the community labels and no vertex +embedding. Moreover, determining the community size is +an important question on its own, but has mostly been ad- +hoc or simply assumed known. +In this paper we design a graph encoder ensemble to +simultaneously achieve graph embedding, community de- +tection, and community-size determination. The ensemble +algorithm utilizes a normalized one-hot graph encoder [10], +ensemble learning [11], [12], k-means clustering [13], [14], +• +Cencheng Shen is with the Department of Applied Economics and Statis- +tics, University of Delaware. E-mail: shenc@udel.edu +• +Carey E.Priebe and Youngser Park are with the Department of Applied +Mathematics and Statistics (AMS), the Center for Imaging Science (CIS), +and the Mathematical Institute for Data Science (MINDS), Johns Hopkins +University. E-mail: cep@jhu.edu, youngser@jhu.edu +This work was supported in part by the National Science Foundation HDR +TRIPODS 1934979, the National Science Foundation DMS-2113099, and by +funding from Microsoft Research. +and a novel rank-based cluster size measure called minimal +rank index to determine the best ensemble and the best +community size. The proposed algorithm has a linear run- +ning time, is scalable to big graphs, and exhibits excellent +numerical performance throughout our experiments. The +code is made available on Github1. +2 +GRAPH ENCODER ENSEMBLE +To better present the main algorithm, we introduce several +auxiliary functions as follows: given an s×3 edgelist E with +n vertices and a label vector Y ∈ Rn of k communities, we +denote the one-hot graph encoder embedding as +Z = one-hot-emb(E, Y), +where Z ∈ Rn×k provides a k-dimensional representation +for each vertex (see [10] for more details). Then the L2 +normalization step is denoted as Z = normalize(Z), which +normalizes each vertex representation to unit norm (see +Section 2.2 for details). Moreover, given any embedding Z +and a label vector Y, we denote the minimal rank index as +MRI(Z, Y) ∈ [0, 1], which measures the clustering quality +and the smaller the better (details in Section 2.3). We also +denote the k-means clustering as k-means(Z, K), and the +adjusted rand index as ARI(Y, Y2) between two label +vectors of the same size. The ARI is a popular matching +metric lies in (−∞, 1], with larger positive number implying +better matchedness and 1 means perfect match [15]. +In the following, we first present the main algorithm. +The major innovations in the proposed algorithm, i.e., L2 +normalization, the minimal rank index, and ensemble em- +bedding, are further explained in the remaining subsections. +2.1 +Methodology +The method is detailed in Algorithm 1 and visualized in +Figure 1. It is applicable to any binary or weighted graph, +directed or undirected graph with or without self-loop. +For the parameter choice, throughout this paper we set +the number of random replicates r = 10, the number of +1. https://github.com/cshen6/GraphEmd +arXiv:2301.11290v1 [cs.SI] 18 Jan 2023 + +IEEE +2 +maximum iteration m = 20, and the clustering range shall +be determined by the experiment under-consideration. +By design, the proposed algorithm achieves simultane- +ous vertex embedding, community detection, and cluster +size determination. Note that the unsupervised graph en- +coder embedding in [10] can be viewed as a special case of +the graph encoder ensemble at |R| = 1 (the clustering range +is a singleton) and r = 1. +Algorithm 1 Graph Encoder Ensemble +Require: An edgelist E, a range of potential cluster size R, +number of random replicates r, and number of maximum +iteration m. +Ensure: The graph embedding Z ∈ Rn× ˆ +K for all vertices, +the estimated number of clusters ˆK, the cluster indices +Y ∈ Rn, and the minimal rank index ind ∈ [0, 1]. +function GRAPH-ENCODER-ENSEMBLE(E, R, r, m) +ind = 1; ▷ initialize the index to pick best cluster size +for k ∈ R do +ind2 = 1; +▷ initialize the index to pick best +random replicate +for i = 1, . . . , r do +ˆYk = rand(k, n); ▷ randomly initialize a label +vector of length n in [k] +for j = 1, . . . , m do +ˆZk = one-hot-emb(E, ˆYk); +ˆZk = normalize(ˆZk); +ˆY +′ +k = k-means(ˆZk, k); +if ARI( ˆYk, ˆY +′ +k)==1 then break; +else ˆYk = ˆY +′ +k; +end if +end for +ˆZk = one-hot-emb(E, ˆYk); +ˆZk = normalize(ˆZk); +ind3 = MRI([ˆZk, ˆYk]); +if ind3 < ind2 then +ˆZ = ˆZk; ˆY = ˆYk; ind2 = ind3; +end if +end for +if ind2 ≤ ind then +Z = ˆZ; Y = ˆY; ˆK = k; ind = ind2; +end if +end for +end function +2.2 +Why Normalization +The normalization step normalize() scales each vertex em- +bedding to unit-norm in Algorithm 1, i.e., for each i, do +Z(i, :) = Z(i, :)/∥Z(i, :)∥2. +Figure 2 illustrates the difference between un-normalized +and +normalized +embedding +for +a +sparse +and +two- +community random graph model. The normalized embed- +ding is on a unit-circle, extracts the connectivity information +and omits the vertex degree, thus better for clustering when +the community information is solely determined by vertex +connectivity (which translates to the block probability in the +stochastic block model). Alternatively, using the normalized +embedding + k-means clustering is equivalent to using un- +normalized embedding + angle / cosine / spherical k- +means clustering. +The distinction here closely resembles the two-truth phe- +nomenon between graph adjacency and graph Laplacian +[16], i.e., the Laplacian spectral embedding (LSE) can be +viewed as a degree-normalized version of adjacency spectral +embedding (ASE), and typically performs better on sparse +graphs. See Section 3.2 and Table 1 for more numerical +evaluations on the normalization effect. +2.3 +The Minimal Rank Index +To measure the clustering quality, we tailored a new rank- +based measure called the minimal rank index (MRI). It is a +key metric in Algorithm 1 to compare multiple embeddings +from different initializations and different community sizes. +Let Yi denote the cluster index of vertex i, d(·, ·) denotes +the Euclidean distance, and µk denote the mean of kth +cluster, i.e., µk = +� +i=1,...,n,Yi=k +Zi. The minimal rank index +is computed as +MRI = +� +i=1,...,n +I{arg +min +k=1,...,K d(Zi, µk) ̸= Yi}/n ∈ [0, 1]. +(1) +Namely, it measures how often the vertex embedding is +not closest to its cluster mean. A smaller value suggests +better clustering quality, and MRI equals 0 means every +vertex is closest to its cluster mean. In the context of k- +means clustering, MRI is non-zero when k-means does not +converge. +Comparing to common cluster size measures like Silhou- +ette Score, Davies Bouldin index, Variance Ratio Criterion, +Gap criterion [17], [18], MRI is rank-based while others +are based on actual distances, i.e., a ratio of within-cluster +distance and between-cluster distance. If MRI is replaced by +any of the other criterion in Algorithm 1, the cluster size +choice will be biased towards the smallest possible. This +is because of the unique incremental-dimension nature of +graph encoder embedding: the embedding dimension of +Algorithm 1 equals k, which is the community size. This +makes the within-cluster distance smaller for smaller k, so +any cluster size measure based on actual distances is biased +towards smallest k. On the other hand, MRI is rank-based +thus not susceptible to this issue, making it robust against +varying dimensions. Section 3.4 and Figure 3 demonstrate +this phenomenon via simulations. +2.4 +Ensemble Embedding and Cluster Size Determina- +tion +By using a set of different models, ensemble learning is +known to improve the learning performance and reduce +the variance. This is employed in Algorithm 1 as follows: +for each k in the cluster range, we compute a set of vertex +embedding and community labels based on random label +initialization, then choose the best one with the smallest +MRI. If there are multiple models with the smallest MRI, +we take the average embedding. +Next, among all possible cluster choice k, we also choose +the best embedding with smallest MRI. In case of multiple + +IEEE +3 +Fig. 1. Illustrate the graph encoder ensemble algorithm by a simple example. +Sparse Graph +0 +0.1 +0.2 +0 +0.05 +0.1 +0.15 +ARI = 0.22 +0 +0.5 +1 +0 +0.2 +0.4 +0.6 +0.8 +1 +ARI = 0.97 +Fig. 2. The Normalization Effect: the left panel shows the adjacency +heatmap of a simulated sparse graph using simulaton 1 in Section 3.1; +the center panel is the resulting embedding without the normalization +step; and the right panel is the resulting embedding with normalization. +The blue and red dots represent the true community of each vertex. The +normalization clearly helps k-means clustering, which yields significantly +better ARI vs ground-truth community labels. +embedding with the smallest MRI, we use the embed- +ding with the largest k. For example, suppose the MRI +is 0, 0, 0, 0.1, 0.2 for K = 2, 3, 4, 5, 6, then graph encoder +ensemble shall choose ˆK = 4. +In the context of Algorithm 1, the ensemble embedding +successfully mitigates potential bad initialization and sig- +nificantly reduces the estimation variance. Section 3.3 and +Table 2 demonstrate its numerical advantage. +2.5 +Computational Complexity Analysis +Algorithm 1 consists of one-hot graph encoder embedding, +k-means clustering, MRI computation, and ensembles. De- +note n as the number of vertices and s as the number of +edges. At any fixed k, the one-hot graph encoder embedding +encoder embedding takes O(nk + s), k-means takes O(nk), +the MRI computation takes O(nk). Therefore, Algorithm 1 +runs in O(rm(n max(R)+s)), i.e., linear with respect to the +number of vertices and edges. Similarly, the storage require- +ment is just O(n max(R) + s). When testing on simulated +graphs using default parameters and max(R) = 10, the +graph encoder ensemble takes < 3 minutes to process 1 +million edges and < 20 minutes for 10 million edges, which +is extremely fast and scalable. +For even larger graph, the loops in Algorithm 1 can be +easily parallelized for more time reduction: The embedding +and MRI can be computed in parallel at each replicate and +each cluster size, before computing the optimal MRI and +outputting the final results. This reduces the running time +to O(n max(R) + s), same as the storage requirement. +3 +EXPERIMENTS +In this section we carry out comprehensive numerical exper- +iments to showcase the advantage of the graph encoder en- +semble, as well as the individual benefits of normalization, +ensemble, and MRI. The benchmarks are the same algorithm +without normalization; without ensemble; MRI replaced; as +well as the adjacency / Laplacian spectral embedding. We +use ARI to measure the matchedness between the ground- +truth labels and the estimated communities. +3.1 +Simulation Set-up +The stochastic block model (SBM) is arguably the most +fundamental community-based random graph model [19], +[20]. Each vertex i is associated with a class label Yi ∈ +{1, . . . , K}. The class label may be fixed a-priori, or gen- +erated by a categorical distribution with prior probability +{πk ∈ (0, 1) with �K +k=1 πk = 1}. Then a block probability +matrix B = [B(k, l)] ∈ [0, 1]K×K specifies the edge proba- +bility between a vertex from class k and a vertex from class +l: for any i < j, +A(i, j) i.i.d. +∼ Bernoulli(B(Yi, Yj)), +A(i, i) = 0, A(j, i) = A(i, j). +The degree-corrected stochastic block model (DC-SBM) +[3] is a generalization of SBM to better model the sparsity of +real graphs. Everything else being the same as SBM, each +vertex i has an additional degree parameter θi, and the +adjacency matrix is generated by +A(i, j) ∼ Bernoulli(θiθjB(Yi, Yj)). +For our simulations, we consider the following four DC- +SBM models with increasing community size. In all four +models, we use the same degree distribution θi +i.i.d. +∼ +Beta(1, 4). + +Input Graph +Different Cluster Size +Random Replicates +Optimal Ensemble +Vertex Embedding & +Communit Size & +Cluster Index +k=2 +(Z, Y, MRI=0) +0.5 +Z_1, MRI=0.1 +00 +k=3 +0 ± +0 +Z_2, MRI=0 +(Z, Y, MRI=0) +1 +1 +0.5 +Z 3, MRI=0 +0.5 +00IEEE +4 +Simulation 1: n = 3000, K = 2, Yi = {1, 2} equally +likely, and the block probability matrix is +B = +�0.5, 0.1 +0.1, 0.5 +� +. +Simulation 2: n = 5000, K = 3, Yi = {1, 2, 3} with prior +probability [0.2, 0.3, 0.5], and the block probability matrix is +B = +� +� +0.9, 0.1, 0.1 +0.1, 0.5, 0.1 +0.1, 0.1, 0.2 +� +� . +Simulation 3: n = 3000, K = 4, Yi = {1, 2, 3, 4} with +prior probability [0.2, 0.2, 0.3, 0.3], and the block probability +matrix is +B = +� +��� +0.9, 0.1, 0.1, 0.1 +0.1, 0.7, 0.1, 0.1 +0.1, 0.1, 0.5, 0.1 +0.1, 0.1, 0.1, 0.3 +� +��� . +Simulation 4: n = 3000, K = 5, Yi with equally likely +prior probability, and the block probability matrix satisfies: +B(i, i) = 0.2 and B(i, j) = 0.1 for all i = 1, . . . , 5 and j ̸= i. +3.2 +Normalization Comparison +Table 1 clearly shows that the clustering ARI using normal- +ized algorithm far exceeds the ARI using the un-normalized +algorithm. To exclude other factors in this comparison, we +simply use r = 1 and assume known cluster size. As +expected, the phenomenon also happens between ASE and +LSE, as LSE is also a normalized version of ASE. +ARI +GEE +GEE (no norm) +ASE +LSE +Simulation 1 +0.91 +0.10 +0.23 +0.91 +Simulation 2 +0.71 +0.17 +0.27 +0.75 +Simulation 3 +0.73 +0.08 +0.12 +0.65 +Simulation 4 +0.78 +0.06 +0.17 +0.78 +TABLE 1 +Evaluate the normalization effect in Graph Encoder Ensemble. +3.3 +Ensemble Comparison +In this simulation we continue to assume known cluster +size, carry out 100 Monte-Carlo replicates, and report ARI +between the ensemble embedding (r = 10) and the no- +ensemble embedding (r = 1). Table 2 shows that the ensem- +ble algorithm clearly outperforms the no-ensemble version: +the mean ARI is improved and variance is significantly +reduced. +Empirically, the default choice of r = 10 worked suf- +ficiently well throughout our experiments, and we do not +observe any significant gain for larger r. Moreover, if the +graph size is sufficiently large and the community structure +is sufficiently separable, using a smaller r or even r = 1 +suffices, which is the case for simulation 1 in Table 2. +Average ARI + std +GEE +GEE (r = 1) +Simulation 1 +0.91 ± 0.01 +0.91 ± 0.01 +Simulation 2 +0.81 ± 0.01 +0.71 ± 0.16 +Simulation 3 +0.79 ± 0.02 +0.72 ± 0.09 +Simulation 4 +0.89 ± 0.01 +0.79 ± 0.12 +TABLE 2 +Evaluate the ensemble advantage in Graph Encoder Ensemble. After +100 Monte-Carlo replicates, the mean and standard deviation of ARI +are reported. +3.4 +Cluster Size Estimation +Here we investigate how well the algorithm estimates the +community size. Instead of using the ground-truth size, we +let R = 2, 3, . . . , 10 and report the results in Figure 3. The +left panel shows the accuracy of community size estimation +as sample size increases: as n increases, graph encoder +ensemble has better and better estimation accuracy, which +eventually reaches 1. As MRI is key to the size determina- +tion, the center panel shows the average MRI for simulation +4 (true K = 5): the ensemble algorithm is able to accurately +estimates the truth, because the largest community size that +minimizes the MRI is indeed 5. The right panel shows the +ensemble algorithm that replaces MRI by the Silhouette +Score (the larger the better): the Silhouette Score biases +towards the smallest cluster size and forces the algorithm +to choose two communities instead. This phenomenon is +consistent throughout all our simulations and experiments, +as well as other size measures like DB-index or variance +ratio. +4 +REAL DATA +In this section we consider the following real data from +network repository2 [21] and Stanford network data3: Cora +Citations (2708 vertices, 5429 edges, 7 classes), Industry +Partnerships (219 vertices, 630 edges, 3 classes), EU Email +Network [22] (1005 vertices, 25571 edges, 42 classes), and +Political Blogs [23] (1490 vertices, 33433 edges, 2 classes). +The benchmarks are LSE assuming true K, graph en- +coder ensemble assuming true K, graph encoder ensemble +estimating K via MRI, and graph encoder ensemble esti- +mating K via Silhouette Score. The resulting ARI score is +reported in Table 3: the ensemble algorithm using known K +is the best performer; and in case of unknown K, though it +does not pick the ground-truth community size, the perfor- +mance is still relatively good. +However, even if the real data comes with ground- +truth, it is often debate-able whether the ground-truth truly +reflects the nature of the connectivity. Take the political blog +data as an example: the blogs were manually separated +into the Republican and Democratic blogs, but naturally +the swing voters matter the most. Interestingly, when not +assuming the true K = 2, the graph encoder ensemble +does pick three communities from the data. Figure 4 visu- +alizes the difference: the left panel visualizes the encoder +embedding using ground-truth label at K = 2, while the +2. https://networkrepository.com/index.php +3. https://snap.stanford.edu/ + +IEEE +5 +1000 +3000 +5000 +Sample Size +0 +0.2 +0.4 +0.6 +0.8 +1 +Estimation Accuracy +Cluster Choice via MRI +Sim 1 +Sim 2 +Sim 3 +Sim 4 +Cluster Choice via MRI for Sim 4 +2 +3 +4 +5 +6 +7 +8 +9 +10 +Cluster Size +0 +0.05 +0.1 +0.15 +Minimum Rank Index +Cluster Choice via SS for Sim 4 +2 +3 +4 +5 +6 +7 +8 +9 +10 +Cluster Size +0 +0.2 +0.4 +0.6 +0.8 +1 +Silhouette Score +Fig. 3. Demonstrate the cluster size estimation in graph encoder ensemble. For each simulation and each graph size, we independently generate +100 graphs, and run the ensemble algorithm to estimate the community size. The left panel shows the estimation accuracy as graph size grows, +i.e., how often the algorithm chooses the correct community size. As graph size increases, the estimation accuracy gradually increases to 1 for all +simulations. The center panel shows the average MRI at n = 5000 for simulation 4, where ˆ +K = 5 is the estimated size and also the ground-truth +size. The right panel shows the average Silhouette Score, where ˆ +KSS = 2 would be the choice. +True K +GEE +LSE +R +GEE using ˆ +K +GEE using ˆ +KSS +Cora Citations +7 +0.11 +0.08 +2-20 +0.07(3) +0.03(2) +Emails +42 +0.48 +0.23 +10-50 +0.39(14) +0.28(10) +Industry +3 +0.13 +0.13 +2-10 +0.08(5) +0.07(2) +Political blogs +2 +0.80 +0.80 +2-10 +0.69(3) +0.80(2) +TABLE 3 +Real Data Experiments. +right panel visualizes the ensemble embedding estimating +3 communities. Surprisingly, the encoder ensemble did an +excellent job in identifying neutral / swing blogs, providing +valuable insights not available from the ground-truth labels. +GEE at K=2 +0 +0.5 +1 +0 +0.5 +1 +-1 +0 +1 +0 +1 +0.5 +1 +GEE at K=3 +0.5 +1 +0.5 +0 +0 +Fig. 4. Visualize the vertex embedding and community structure for the +political blogs. Different colors represent different communities. The left +panel is the vertex embedding using ground-truth size K = 2, while the +right panel shows the ensemble embedding estimating 3 communities. +5 +CONCLUSION +In this paper we proposed the graph encoder ensemble +that simultaneously achieves graph embedding, community +detection, and community size determination. It is easy to +implement, computationally efficient, performs well against +sparse graphs and unknown community size throughout +the experiments. There are several interesting topics to +pursue for future works, such as a mathematical proof for +the asymptotic clustering optimality under stochastic block +model, further investigation into the theoretical and numer- +ical properties of MRI, algorithm improvement for multi- +level community detection, and applications to complicated +network data such as dynamic and multi-modal graphs. +REFERENCES +[1] +M. Girvan and M. E. J. Newman, “Community structure in so- +cial and biological networks,” Proceedings of National Academy of +Science, vol. 99, no. 12, pp. 7821–7826, 2002. 1 +[2] +B. Karrer and M. E. J. Newman, “Stochastic blockmodels and +community structure in networks,” Physical Review E, vol. 83, p. +016107, 2011. 1 +[3] +Y. Zhao, E. Levina, and J. Zhu, “Consistency of community detec- +tion in networks under degree-corrected stochastic block models,” +Annals of Statistics, vol. 40, no. 4, pp. 2266–2292, 2012. 1, 3 +[4] +E. Abbe, “Community detection and stochastic block models: Re- +cent developments,” Journal of Machine Learning Research, vol. 18, +no. 177, pp. 1–86, 2018. 1 +[5] +K. Rohe, S. Chatterjee, and B. Yu, “Spectral clustering and the high- +dimensional stochastic blockmodel,” Annals of Statistics, vol. 39, +no. 4, pp. 1878–1915, 2011. 1 +[6] +D. Sussman, M. Tang, D. Fishkind, and C. Priebe, “A consistent +adjacency spectral embedding for stochastic blockmodel graphs,” +Journal of the American Statistical Association, vol. 107, no. 499, pp. +1119–1128, 2012. 1 +[7] +C. Gao, Z. Ma, A. Y. Zhang, and H. H. Zhou, “Community +detection in degree-corrected block models,” Annals of Statistics, +vol. 46, no. 5, pp. 2153–2185, 2018. 1 +[8] +V. D. Blondel, J. L. Guillaume, R. Lambiotte, and E. Lefebvre, “Fast +unfolding of communities in large networks,” Journal of Statistical +Mechanics: Theory and Experiment, vol. 10008, p. 6, 2008. 1 + +IEEE +6 +[9] +V. A. Traag, L. Waltman, and N. J. van Eck, “From louvain +to leiden: guaranteeing well-connected communities,” Scientific +Reports, vol. 9, p. 5233, 2019. 1 +[10] C. Shen, Q. Wang, and C. E. Priebe, “One-hot graph encoder +embedding,” IEEE Transactions on Pattern Analysis and Machine +Intelligence, accepted, 2023. 1, 2 +[11] R. Maclin and D. Opitz, “Popular ensemble methods: An empirical +study,” Journal Of Artificial Intelligence Research, vol. 11, pp. 169– +198, 1999. 1 +[12] L. Breiman, “Random forests,” Machine Learning, vol. 4, no. 1, pp. +5–32, October 2001. 1 +[13] S. P. Lloyd, “Least squares quantization in pcm,” IEEE Transactions +on Information Theory, vol. 28, no. 2, p. 129–137, 1982. 1 +[14] E. W. Forgy, “Cluster analysis of multivariate data: efficiency +versus interpretability of classifications,” Biometrics, vol. 21, no. 3, +p. 768–769, 1965. 1 +[15] W. M. Rand, “Objective criteria for the evaluation of clustering +methods,” Journal of the American Statistical Association, vol. 66, no. +336, pp. 846–850, 1971. 1 +[16] C. Priebe, Y. Parker, J. Vogelstein, J. Conroy, V. Lyzinskic, M. Tang, +A. Athreya, J. Cape, and E. Bridgeford, “On a ’two truths’ phe- +nomenon in spectral graph clustering,” Proceedings of the National +Academy of Sciences, vol. 116, no. 13, pp. 5995–5600, 2019. 2 +[17] P. J. Rousseeuw, “Silhouettes: a graphical aid to the interpretation +and validation of cluster analysis,” Computational and Applied +Mathematics, vol. 20, pp. 53–65, 1987. 2 +[18] D. L. Davies and D. W. Bouldin, “A cluster separation measure,” +IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 1, +no. 2, pp. 224–227, 1989. 2 +[19] P. Holland, K. Laskey, and S. Leinhardt, “Stochastic blockmodels: +First steps,” Social Networks, vol. 5, no. 2, pp. 109–137, 1983. 3 +[20] T. Snijders and K. Nowicki, “Estimation and prediction for +stochastic blockmodels for graphs with latent block structure,” +Journal of Classification, vol. 14, no. 1, pp. 75–100, 1997. 3 +[21] R. A. Rossi and N. K. Ahmed, “The network data repository with +interactive graph analytics and visualization,” in AAAI, 2015. +[Online]. Available: https://networkrepository.com 4 +[22] H. Yin, A. R. Benson, J. Leskovec, and D. F. Gleich, “Local higher- +order graph clustering,” in Proceedings of the 23rd ACM SIGKDD +International Conference on Knowledge Discovery and Data Mining, +2017, p. 555–564. 4 +[23] L. Adamic and N. Glance, “The political blogosphere and the +2004 us election: Divided they blog,” in Proceedings of the 3rd +International Workshop on Link Discovery. +New York: ACM Press, +2005, pp. 36–43. 4 +Cencheng Shen received the BS degree in +Quantitative Finance from National Univer- +sity of Singapore in 2010, and the PhD de- +gree in Applied Mathematics and Statistics +from Johns Hopkins University in 2015. He is +assistant professor in the Department of Ap- +plied Economics and Statistics at University +of Delaware. His research interests include +graph inference, hypothesis testing, correla- +tion and dependence. +Youngser Park received the B.E. degree in +electrical engineering from Inha University in +Seoul, Korea in 1985, the M.S. and Ph.D. de- +grees in computer science from The George +Washington University in 1991 and 2011 re- +spectively. From 1998 to 2000 he worked at +the Johns Hopkins Medical Institutes as a +senior research engineer. From 2003 until +2011 he worked as a senior research analyst, +and has been an associate research scientist +since 2011 then research scientist since 2019 +in the Center for Imaging Science at the Johns Hopkins University. +At Johns Hopkins, he holds joint appointments in the The Institute +for Computational Medicine and the Human Language Technology +Center of Excellence. His current research interests are cluster- +ing algorithms, pattern classification, and data mining for high- +dimensional and graph data. +Carey E. Priebe received the BS degree in +mathematics from Purdue University in 1984, +the MS degree in computer science from San +Diego State University in 1988, and the PhD +degree in information technology (computa- +tional statistics) from George Mason Univer- +sity in 1993. From 1985 to 1994 he worked +as a mathematician and scientist in the US +Navy research and development laboratory +system. Since 1994 he has been a professor +in the Department of Applied Mathematics +and Statistics at Johns Hopkins University. His research interests +include computational statistics, kernel and mixture estimates, sta- +tistical pattern recognition, model selection, and statistical infer- +ence for high-dimensional and graph data. He is a Senior Member +of the IEEE, an Elected Member of the International Statistical +Institute, a Fellow of the Institute of Mathematical Statistics, and +a Fellow of the American Statistical Association. + diff --git a/WtFIT4oBgHgl3EQfiCtg/content/tmp_files/load_file.txt b/WtFIT4oBgHgl3EQfiCtg/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..122184e5ff5d57308a1fc52a6c63ba74b3648c0c --- /dev/null +++ b/WtFIT4oBgHgl3EQfiCtg/content/tmp_files/load_file.txt @@ -0,0 +1,529 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf,len=528 +page_content='IEEE 1 Graph Encoder Ensemble for Simultaneous Vertex Embedding and Community Detection Cencheng Shen, Youngser Park, Carey E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Priebe Abstract—In this paper we propose a novel and computationally efficient method to simultaneously achieve vertex embedding, community detection, and community size determination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' By utilizing a normalized one-hot graph encoder and a new rank-based cluster size measure, the proposed graph encoder ensemble algorithm achieves excellent numerical performance throughout a variety of simulations and real data experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Index Terms—Graph Embedding, Vertex Clustering, L2 Normalization !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 INTRODUCTION G RAPH data consists of a collection of vertices and edges representing the pairwise relationship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Given n ver- tices and s edges, a graph (or network) can be represented by an n × n adjacency matrix A where A(i, j) is the edge weight between the ith vertex and jth vertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' In practice, it is often stored by an s × 3 edgelist E, where the first two columns store the vertex indices of each edge and the last column is the edge weight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Community detection [1], [2], [3], [4] is a fundamental problem for graph data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Often, the vertices can be nat- urally separated into several communities, where within- community vertices are more connected than between- community vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' This problem is also called vertex clus- tering or graph partition, and many algorithms have been proposed on this end.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' For example, embedding-based methods like adjacency or Laplacian spectral clustering are provably consistent under popular random graph models [5], [6], as well as likelihood-based techniques [4], [7] — however, they are typically slow and not scalable to large graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' On the other hand, modularity-based methods like Louvain and Leiden are much faster and thus more popular in practice [8], [9] — however, there is relatively little theoretical investigation, and the output only has the community labels and no vertex embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Moreover, determining the community size is an important question on its own, but has mostly been ad- hoc or simply assumed known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' In this paper we design a graph encoder ensemble to simultaneously achieve graph embedding, community de- tection, and community-size determination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The ensemble algorithm utilizes a normalized one-hot graph encoder [10], ensemble learning [11], [12], k-means clustering [13], [14], Cencheng Shen is with the Department of Applied Economics and Statis- tics, University of Delaware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' E-mail: shenc@udel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='edu Carey E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='Priebe and Youngser Park are with the Department of Applied Mathematics and Statistics (AMS), the Center for Imaging Science (CIS), and the Mathematical Institute for Data Science (MINDS), Johns Hopkins University.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' E-mail: cep@jhu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='edu, youngser@jhu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='edu This work was supported in part by the National Science Foundation HDR TRIPODS 1934979, the National Science Foundation DMS-2113099, and by funding from Microsoft Research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' and a novel rank-based cluster size measure called minimal rank index to determine the best ensemble and the best community size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The proposed algorithm has a linear run- ning time, is scalable to big graphs, and exhibits excellent numerical performance throughout our experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The code is made available on Github1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2 GRAPH ENCODER ENSEMBLE To better present the main algorithm, we introduce several auxiliary functions as follows: given an s×3 edgelist E with n vertices and a label vector Y ∈ Rn of k communities, we denote the one-hot graph encoder embedding as Z = one-hot-emb(E, Y), where Z ∈ Rn×k provides a k-dimensional representation for each vertex (see [10] for more details).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Then the L2 normalization step is denoted as Z = normalize(Z), which normalizes each vertex representation to unit norm (see Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 for details).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Moreover, given any embedding Z and a label vector Y, we denote the minimal rank index as MRI(Z, Y) ∈ [0, 1], which measures the clustering quality and the smaller the better (details in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' We also denote the k-means clustering as k-means(Z, K), and the adjusted rand index as ARI(Y, Y2) between two label vectors of the same size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The ARI is a popular matching metric lies in (−∞, 1], with larger positive number implying better matchedness and 1 means perfect match [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' In the following, we first present the main algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The major innovations in the proposed algorithm, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=', L2 normalization, the minimal rank index, and ensemble em- bedding, are further explained in the remaining subsections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 Methodology The method is detailed in Algorithm 1 and visualized in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' It is applicable to any binary or weighted graph, directed or undirected graph with or without self-loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' For the parameter choice, throughout this paper we set the number of random replicates r = 10, the number of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='com/cshen6/GraphEmd arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='11290v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='SI] 18 Jan 2023 IEEE 2 maximum iteration m = 20, and the clustering range shall be determined by the experiment under-consideration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' By design, the proposed algorithm achieves simultane- ous vertex embedding, community detection, and cluster size determination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Note that the unsupervised graph en- coder embedding in [10] can be viewed as a special case of the graph encoder ensemble at |R| = 1 (the clustering range is a singleton) and r = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Algorithm 1 Graph Encoder Ensemble Require: An edgelist E, a range of potential cluster size R, number of random replicates r, and number of maximum iteration m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Ensure: The graph embedding Z ∈ Rn× ˆ K for all vertices, the estimated number of clusters ˆK, the cluster indices Y ∈ Rn, and the minimal rank index ind ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' function GRAPH-ENCODER-ENSEMBLE(E, R, r, m) ind = 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ▷ initialize the index to pick best cluster size for k ∈ R do ind2 = 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ▷ initialize the index to pick best random replicate for i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' , r do ˆYk = rand(k, n);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ▷ randomly initialize a label vector of length n in [k] for j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' , m do ˆZk = one-hot-emb(E, ˆYk);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ˆZk = normalize(ˆZk);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ˆY ′ k = k-means(ˆZk, k);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' if ARI( ˆYk, ˆY ′ k)==1 then break;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' else ˆYk = ˆY ′ k;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' end if end for ˆZk = one-hot-emb(E, ˆYk);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ˆZk = normalize(ˆZk);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ind3 = MRI([ˆZk, ˆYk]);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' if ind3 < ind2 then ˆZ = ˆZk;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ˆY = ˆYk;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ind2 = ind3;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' end if end for if ind2 ≤ ind then Z = ˆZ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Y = ˆY;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ˆK = k;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ind = ind2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' end if end for end function 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 Why Normalization The normalization step normalize() scales each vertex em- bedding to unit-norm in Algorithm 1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=', for each i, do Z(i, :) = Z(i, :)/∥Z(i, :)∥2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Figure 2 illustrates the difference between un-normalized and normalized embedding for a sparse and two- community random graph model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The normalized embed- ding is on a unit-circle, extracts the connectivity information and omits the vertex degree, thus better for clustering when the community information is solely determined by vertex connectivity (which translates to the block probability in the stochastic block model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Alternatively, using the normalized embedding + k-means clustering is equivalent to using un- normalized embedding + angle / cosine / spherical k- means clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The distinction here closely resembles the two-truth phe- nomenon between graph adjacency and graph Laplacian [16], i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=', the Laplacian spectral embedding (LSE) can be viewed as a degree-normalized version of adjacency spectral embedding (ASE), and typically performs better on sparse graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' See Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 and Table 1 for more numerical evaluations on the normalization effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='3 The Minimal Rank Index To measure the clustering quality, we tailored a new rank- based measure called the minimal rank index (MRI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' It is a key metric in Algorithm 1 to compare multiple embeddings from different initializations and different community sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Let Yi denote the cluster index of vertex i, d(·, ·) denotes the Euclidean distance, and µk denote the mean of kth cluster, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=', µk = � i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=',n,Yi=k Zi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The minimal rank index is computed as MRI = � i=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=',n I{arg min k=1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=',K d(Zi, µk) ̸= Yi}/n ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' (1) Namely, it measures how often the vertex embedding is not closest to its cluster mean.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' A smaller value suggests better clustering quality, and MRI equals 0 means every vertex is closest to its cluster mean.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' In the context of k- means clustering, MRI is non-zero when k-means does not converge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Comparing to common cluster size measures like Silhou- ette Score, Davies Bouldin index, Variance Ratio Criterion, Gap criterion [17], [18], MRI is rank-based while others are based on actual distances, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=', a ratio of within-cluster distance and between-cluster distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' If MRI is replaced by any of the other criterion in Algorithm 1, the cluster size choice will be biased towards the smallest possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' This is because of the unique incremental-dimension nature of graph encoder embedding: the embedding dimension of Algorithm 1 equals k, which is the community size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' This makes the within-cluster distance smaller for smaller k, so any cluster size measure based on actual distances is biased towards smallest k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' On the other hand, MRI is rank-based thus not susceptible to this issue, making it robust against varying dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='4 and Figure 3 demonstrate this phenomenon via simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='4 Ensemble Embedding and Cluster Size Determina- tion By using a set of different models, ensemble learning is known to improve the learning performance and reduce the variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' This is employed in Algorithm 1 as follows: for each k in the cluster range, we compute a set of vertex embedding and community labels based on random label initialization, then choose the best one with the smallest MRI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' If there are multiple models with the smallest MRI, we take the average embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Next, among all possible cluster choice k, we also choose the best embedding with smallest MRI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' In case of multiple IEEE 3 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Illustrate the graph encoder ensemble algorithm by a simple example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Sparse Graph 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='15 ARI = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='22 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='8 1 ARI = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='97 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The Normalization Effect: the left panel shows the adjacency heatmap of a simulated sparse graph using simulaton 1 in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' the center panel is the resulting embedding without the normalization step;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' and the right panel is the resulting embedding with normalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The blue and red dots represent the true community of each vertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The normalization clearly helps k-means clustering, which yields significantly better ARI vs ground-truth community labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' embedding with the smallest MRI, we use the embed- ding with the largest k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' For example, suppose the MRI is 0, 0, 0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 for K = 2, 3, 4, 5, 6, then graph encoder ensemble shall choose ˆK = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' In the context of Algorithm 1, the ensemble embedding successfully mitigates potential bad initialization and sig- nificantly reduces the estimation variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='3 and Table 2 demonstrate its numerical advantage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 Computational Complexity Analysis Algorithm 1 consists of one-hot graph encoder embedding, k-means clustering, MRI computation, and ensembles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' De- note n as the number of vertices and s as the number of edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' At any fixed k, the one-hot graph encoder embedding encoder embedding takes O(nk + s), k-means takes O(nk), the MRI computation takes O(nk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Therefore, Algorithm 1 runs in O(rm(n max(R)+s)), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=', linear with respect to the number of vertices and edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Similarly, the storage require- ment is just O(n max(R) + s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' When testing on simulated graphs using default parameters and max(R) = 10, the graph encoder ensemble takes < 3 minutes to process 1 million edges and < 20 minutes for 10 million edges, which is extremely fast and scalable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' For even larger graph, the loops in Algorithm 1 can be easily parallelized for more time reduction: The embedding and MRI can be computed in parallel at each replicate and each cluster size, before computing the optimal MRI and outputting the final results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' This reduces the running time to O(n max(R) + s), same as the storage requirement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3 EXPERIMENTS In this section we carry out comprehensive numerical exper- iments to showcase the advantage of the graph encoder en- semble, as well as the individual benefits of normalization, ensemble, and MRI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The benchmarks are the same algorithm without normalization;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' without ensemble;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' MRI replaced;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' as well as the adjacency / Laplacian spectral embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' We use ARI to measure the matchedness between the ground- truth labels and the estimated communities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 Simulation Set-up The stochastic block model (SBM) is arguably the most fundamental community-based random graph model [19], [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Each vertex i is associated with a class label Yi ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' , K}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The class label may be fixed a-priori, or gen- erated by a categorical distribution with prior probability {πk ∈ (0, 1) with �K k=1 πk = 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Then a block probability matrix B = [B(k, l)] ∈ [0, 1]K×K specifies the edge proba- bility between a vertex from class k and a vertex from class l: for any i < j, A(i, j) i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ∼ Bernoulli(B(Yi, Yj)), A(i, i) = 0, A(j, i) = A(i, j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The degree-corrected stochastic block model (DC-SBM) [3] is a generalization of SBM to better model the sparsity of real graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Everything else being the same as SBM, each vertex i has an additional degree parameter θi, and the adjacency matrix is generated by A(i, j) ∼ Bernoulli(θiθjB(Yi, Yj)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' For our simulations, we consider the following four DC- SBM models with increasing community size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' In all four models, we use the same degree distribution θi i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ∼ Beta(1, 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Input Graph Different Cluster Size Random Replicates Optimal Ensemble Vertex Embedding & Communit Size & Cluster Index k=2 (Z, Y, MRI=0) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 Z_1, MRI=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 00 k=3 0 ± 0 Z_2, MRI=0 (Z, Y, MRI=0) 1 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 Z 3, MRI=0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 00IEEE 4 Simulation 1: n = 3000, K = 2, Yi = {1, 2} equally likely, and the block probability matrix is B = �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Simulation 2: n = 5000, K = 3, Yi = {1, 2, 3} with prior probability [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='3, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5], and the block probability matrix is B = � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='9, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Simulation 3: n = 3000, K = 4, Yi = {1, 2, 3, 4} with prior probability [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='3, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='3], and the block probability matrix is B = � ��� 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='9, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='7, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='3 � ��� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Simulation 4: n = 3000, K = 5, Yi with equally likely prior probability, and the block probability matrix satisfies: B(i, i) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 and B(i, j) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 for all i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' , 5 and j ̸= i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 Normalization Comparison Table 1 clearly shows that the clustering ARI using normal- ized algorithm far exceeds the ARI using the un-normalized algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' To exclude other factors in this comparison, we simply use r = 1 and assume known cluster size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' As expected, the phenomenon also happens between ASE and LSE, as LSE is also a normalized version of ASE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' ARI GEE GEE (no norm) ASE LSE Simulation 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='91 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='91 Simulation 2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='71 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='27 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='75 Simulation 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='73 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='65 Simulation 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='78 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='78 TABLE 1 Evaluate the normalization effect in Graph Encoder Ensemble.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='3 Ensemble Comparison In this simulation we continue to assume known cluster size, carry out 100 Monte-Carlo replicates, and report ARI between the ensemble embedding (r = 10) and the no- ensemble embedding (r = 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Table 2 shows that the ensem- ble algorithm clearly outperforms the no-ensemble version: the mean ARI is improved and variance is significantly reduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Empirically, the default choice of r = 10 worked suf- ficiently well throughout our experiments, and we do not observe any significant gain for larger r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Moreover, if the graph size is sufficiently large and the community structure is sufficiently separable, using a smaller r or even r = 1 suffices, which is the case for simulation 1 in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Average ARI + std GEE GEE (r = 1) Simulation 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='91 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='91 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='01 Simulation 2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='81 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='71 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='16 Simulation 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='79 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='72 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='09 Simulation 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='89 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='79 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='12 TABLE 2 Evaluate the ensemble advantage in Graph Encoder Ensemble.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' After 100 Monte-Carlo replicates, the mean and standard deviation of ARI are reported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='4 Cluster Size Estimation Here we investigate how well the algorithm estimates the community size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Instead of using the ground-truth size, we let R = 2, 3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' , 10 and report the results in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The left panel shows the accuracy of community size estimation as sample size increases: as n increases, graph encoder ensemble has better and better estimation accuracy, which eventually reaches 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' As MRI is key to the size determina- tion, the center panel shows the average MRI for simulation 4 (true K = 5): the ensemble algorithm is able to accurately estimates the truth, because the largest community size that minimizes the MRI is indeed 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The right panel shows the ensemble algorithm that replaces MRI by the Silhouette Score (the larger the better): the Silhouette Score biases towards the smallest cluster size and forces the algorithm to choose two communities instead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' This phenomenon is consistent throughout all our simulations and experiments, as well as other size measures like DB-index or variance ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 4 REAL DATA In this section we consider the following real data from network repository2 [21] and Stanford network data3: Cora Citations (2708 vertices, 5429 edges, 7 classes), Industry Partnerships (219 vertices, 630 edges, 3 classes), EU Email Network [22] (1005 vertices, 25571 edges, 42 classes), and Political Blogs [23] (1490 vertices, 33433 edges, 2 classes).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The benchmarks are LSE assuming true K, graph en- coder ensemble assuming true K, graph encoder ensemble estimating K via MRI, and graph encoder ensemble esti- mating K via Silhouette Score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The resulting ARI score is reported in Table 3: the ensemble algorithm using known K is the best performer;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' and in case of unknown K, though it does not pick the ground-truth community size, the perfor- mance is still relatively good.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' However, even if the real data comes with ground- truth, it is often debate-able whether the ground-truth truly reflects the nature of the connectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Take the political blog data as an example: the blogs were manually separated into the Republican and Democratic blogs, but naturally the swing voters matter the most.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Interestingly, when not assuming the true K = 2, the graph encoder ensemble does pick three communities from the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Figure 4 visu- alizes the difference: the left panel visualizes the encoder embedding using ground-truth label at K = 2, while the 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' https://networkrepository.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='com/index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='php 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' https://snap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='stanford.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='edu/ IEEE 5 1000 3000 5000 Sample Size 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='8 1 Estimation Accuracy Cluster Choice via MRI Sim 1 Sim 2 Sim 3 Sim 4 Cluster Choice via MRI for Sim 4 2 3 4 5 6 7 8 9 10 Cluster Size 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='15 Minimum Rank Index Cluster Choice via SS for Sim 4 2 3 4 5 6 7 8 9 10 Cluster Size 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='8 1 Silhouette Score Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Demonstrate the cluster size estimation in graph encoder ensemble.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' For each simulation and each graph size, we independently generate 100 graphs, and run the ensemble algorithm to estimate the community size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The left panel shows the estimation accuracy as graph size grows, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=', how often the algorithm chooses the correct community size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' As graph size increases, the estimation accuracy gradually increases to 1 for all simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The center panel shows the average MRI at n = 5000 for simulation 4, where ˆ K = 5 is the estimated size and also the ground-truth size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The right panel shows the average Silhouette Score, where ˆ KSS = 2 would be the choice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' True K GEE LSE R GEE using ˆ K GEE using ˆ KSS Cora Citations 7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='08 2-20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='07(3) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='03(2) Emails 42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='23 10-50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='39(14) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='28(10) Industry 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='13 2-10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='08(5) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='07(2) Political blogs 2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='80 2-10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='69(3) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='80(2) TABLE 3 Real Data Experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' right panel visualizes the ensemble embedding estimating 3 communities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Surprisingly, the encoder ensemble did an excellent job in identifying neutral / swing blogs, providing valuable insights not available from the ground-truth labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' GEE at K=2 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 1 1 0 1 0 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 1 GEE at K=3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='5 0 0 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Visualize the vertex embedding and community structure for the political blogs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Different colors represent different communities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' The left panel is the vertex embedding using ground-truth size K = 2, while the right panel shows the ensemble embedding estimating 3 communities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 5 CONCLUSION In this paper we proposed the graph encoder ensemble that simultaneously achieves graph embedding, community detection, and community size determination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' It is easy to implement, computationally efficient, performs well against sparse graphs and unknown community size throughout the experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' There are several interesting topics to pursue for future works, such as a mathematical proof for the asymptotic clustering optimality under stochastic block model, further investigation into the theoretical and numer- ical properties of MRI, algorithm improvement for multi- level community detection, and applications to complicated network data such as dynamic and multi-modal graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' REFERENCES [1] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Girvan and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Newman, “Community structure in so- cial and biological networks,” Proceedings of National Academy of Science, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 99, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 12, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 7821–7826, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [2] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Karrer and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Newman, “Stochastic blockmodels and community structure in networks,” Physical Review E, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 83, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 016107, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [3] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Zhao, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Levina, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Zhu, “Consistency of community detec- tion in networks under degree-corrected stochastic block models,” Annals of Statistics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 40, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2266–2292, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1, 3 [4] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Abbe, “Community detection and stochastic block models: Re- cent developments,” Journal of Machine Learning Research, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 18, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 177, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1–86, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [5] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Rohe, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Chatterjee, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Yu, “Spectral clustering and the high- dimensional stochastic blockmodel,” Annals of Statistics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 39, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1878–1915, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [6] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Sussman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Tang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Fishkind, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Priebe, “A consistent adjacency spectral embedding for stochastic blockmodel graphs,” Journal of the American Statistical Association, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 107, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 499, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1119–1128, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [7] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Gao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Ma, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Zhang, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Zhou, “Community detection in degree-corrected block models,” Annals of Statistics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 46, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2153–2185, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [8] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Blondel, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Guillaume, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Lambiotte, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Lefebvre, “Fast unfolding of communities in large networks,” Journal of Statistical Mechanics: Theory and Experiment, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 10008, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 6, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 IEEE 6 [9] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Traag, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Waltman, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' van Eck, “From louvain to leiden: guaranteeing well-connected communities,” Scientific Reports, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 9, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 5233, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [10] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Shen, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Wang, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Priebe, “One-hot graph encoder embedding,” IEEE Transactions on Pattern Analysis and Machine Intelligence, accepted, 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1, 2 [11] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Maclin and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Opitz, “Popular ensemble methods: An empirical study,” Journal Of Artificial Intelligence Research, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 169– 198, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [12] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Breiman, “Random forests,” Machine Learning, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 4, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 5–32, October 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [13] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Lloyd, “Least squares quantization in pcm,” IEEE Transactions on Information Theory, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 28, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 129–137, 1982.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [14] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Forgy, “Cluster analysis of multivariate data: efficiency versus interpretability of classifications,” Biometrics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 21, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 768–769, 1965.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [15] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Rand, “Objective criteria for the evaluation of clustering methods,” Journal of the American Statistical Association, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 66, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 336, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 846–850, 1971.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1 [16] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Priebe, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Parker, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Vogelstein, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Conroy, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Lyzinskic, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Tang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Athreya, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Cape, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Bridgeford, “On a ’two truths’ phe- nomenon in spectral graph clustering,” Proceedings of the National Academy of Sciences, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 116, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 13, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 5995–5600, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2 [17] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Rousseeuw, “Silhouettes: a graphical aid to the interpretation and validation of cluster analysis,” Computational and Applied Mathematics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 20, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 53–65, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2 [18] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Davies and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Bouldin, “A cluster separation measure,” IEEE Transactions on Pattern Analysis and Machine Intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 224–227, 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2 [19] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Holland, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Laskey, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Leinhardt, “Stochastic blockmodels: First steps,” Social Networks, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 5, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 109–137, 1983.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3 [20] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Snijders and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Nowicki, “Estimation and prediction for stochastic blockmodels for graphs with latent block structure,” Journal of Classification, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 14, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 75–100, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 3 [21] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Rossi and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Ahmed, “The network data repository with interactive graph analytics and visualization,” in AAAI, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Available: https://networkrepository.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='com 4 [22] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Yin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Benson, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Leskovec, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Gleich, “Local higher- order graph clustering,” in Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2017, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 555–564.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 4 [23] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Adamic and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Glance, “The political blogosphere and the 2004 us election: Divided they blog,” in Proceedings of the 3rd International Workshop on Link Discovery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' New York: ACM Press, 2005, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 36–43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' 4 Cencheng Shen received the BS degree in Quantitative Finance from National Univer- sity of Singapore in 2010, and the PhD de- gree in Applied Mathematics and Statistics from Johns Hopkins University in 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' He is assistant professor in the Department of Ap- plied Economics and Statistics at University of Delaware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' His research interests include graph inference, hypothesis testing, correla- tion and dependence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Youngser Park received the B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' degree in electrical engineering from Inha University in Seoul, Korea in 1985, the M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' and Ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' de- grees in computer science from The George Washington University in 1991 and 2011 re- spectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' From 1998 to 2000 he worked at the Johns Hopkins Medical Institutes as a senior research engineer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' From 2003 until 2011 he worked as a senior research analyst, and has been an associate research scientist since 2011 then research scientist since 2019 in the Center for Imaging Science at the Johns Hopkins University.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' At Johns Hopkins, he holds joint appointments in the The Institute for Computational Medicine and the Human Language Technology Center of Excellence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' His current research interests are cluster- ing algorithms, pattern classification, and data mining for high- dimensional and graph data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Carey E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Priebe received the BS degree in mathematics from Purdue University in 1984, the MS degree in computer science from San Diego State University in 1988, and the PhD degree in information technology (computa- tional statistics) from George Mason Univer- sity in 1993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' From 1985 to 1994 he worked as a mathematician and scientist in the US Navy research and development laboratory system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' Since 1994 he has been a professor in the Department of Applied Mathematics and Statistics at Johns Hopkins University.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' His research interests include computational statistics, kernel and mixture estimates, sta- tistical pattern recognition, model selection, and statistical infer- ence for high-dimensional and graph data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} +page_content=' He is a Senior Member of the IEEE, an Elected Member of the International Statistical Institute, a Fellow of the Institute of Mathematical Statistics, and a Fellow of the American Statistical Association.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/WtFIT4oBgHgl3EQfiCtg/content/2301.11290v1.pdf'} diff --git a/XNAyT4oBgHgl3EQfh_h5/content/2301.00387v1.pdf b/XNAyT4oBgHgl3EQfh_h5/content/2301.00387v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..33def9646df107d13b7a90ec8a5616704147a6b1 --- /dev/null +++ b/XNAyT4oBgHgl3EQfh_h5/content/2301.00387v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:584b51da1364a12b0b500a10cc17b279a3a3fd8b17ee9590e540036a0577e546 +size 255974 diff --git a/XNAyT4oBgHgl3EQfh_h5/vector_store/index.faiss b/XNAyT4oBgHgl3EQfh_h5/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..009254d5e8c5e51725d3f826aa650edac5f59759 --- /dev/null +++ b/XNAyT4oBgHgl3EQfh_h5/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:433459ffa60f4fbff4e2eb2dff0b4422ffa5b58eaa35ba614520482459f73339 +size 2883629 diff --git a/XNAyT4oBgHgl3EQfh_h5/vector_store/index.pkl b/XNAyT4oBgHgl3EQfh_h5/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f731f50c2d497918073c007e3e7217938c279a74 --- /dev/null +++ b/XNAyT4oBgHgl3EQfh_h5/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e61be0c37a93aa0cefd30056831334107fd65519e0387086814f791432ebd13 +size 107806 diff --git a/YNE0T4oBgHgl3EQf3wIQ/content/tmp_files/2301.02728v1.pdf.txt b/YNE0T4oBgHgl3EQf3wIQ/content/tmp_files/2301.02728v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..683da8e9e05dc3bcbd037015809a1f61897c5636 --- /dev/null +++ b/YNE0T4oBgHgl3EQf3wIQ/content/tmp_files/2301.02728v1.pdf.txt @@ -0,0 +1,798 @@ +arXiv:2301.02728v1 [econ.TH] 6 Jan 2023 +A RESPONSIBILITY VALUE FOR DIGRAPHS +ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD +Abstract. There is an increasing need to hold players responsible for negative +or positive impact that take place elsewhere in a value chain or a network. For +example, countries or companies are held more and more responsible for their indi- +rect carbon emissions. We introduce a responsibility value that allocates the total +impact of the value chain among the players, taking into account their direct im- +pact and their indirect impact through the underlying graph. Moreover, we show +that the responsibility value satisfies a set of natural yet important properties. +1. Introduction +We see that there is an increasing urge to account for environmental or social +impact throughout a supply chain or network: through the choices players in such +network make, for example by choosing to buy or not buy from a certain supplier, +they carry some responsibility for not only their own impacts, but also for the +impacts that take place upstream of their own economic activities. Examples of +impacts for which indirect responsibility could be assigned to players include but +are not limited to the emission of greenhouse gases, the underpayment of employees, +the disposal of hazardous waste, but possibly also positive impacts such as the tran- +sition to renewable energy. Assigning this responsibility and consequently holding +these players liable for such impacts, can incentivise the increase of positive impacts +or the decrease of negative impacts. +However, as of now, it is not clear how to assign responsibility for indirect im- +pacts to players in a given network. In the case where the considered impacts are +the emissions of greenhouse gases (GHG), there is consensus that players carry some +responsibility for their indirect emissions. This could be either the energy required +to run their processes, known as scope 2 emissions, or for all the other emissions +the sources of which were not controlled by the player: the scope 3 emissions [3]. +However, to which extent these players are responsible for their scope 2 and 3 emis- +sions, remains unclear. Moreover, in this context, the same unit of emission could +be claimed twice; when the emission falls within one scope for one player and under +another scope for another player. The double counting of the same reduction efforts +Date: January 10, 2023. +2020 Mathematics Subject Classification. MSC Primary 91B32; Secondary 05C20. +This work has received funding from the European Union’s Horizon 2020 research and innovation +programme under the Marie Sk�lodowska-Curie grant agreement No 956107, “Economic Policy in +Complex Environments (EPOC)”. +1 + +2 +ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD +that might subsequently occur is a major policy concern when trying to achieve the +emission reduction goals [1]. +We thus aim to clarify and quantify the indirect responsibility for impacts in a +general manner. Based on the impact of every player and the underlying graph +of the players, we introduce a responsibility value that assigns to every player a +responsibility for the social or environmental impact of itself and the nodes it benefits +from. We show that this value satisfies certain properties, that resemble the axioms +that Shapley used in cooperative game theory to define the Shapley value [?]. In +section 3 we offer a method to approximate a solution. Subsequently, we discuss +the interpretation of the value of the discount factor that plays an important role +on the emphasis that is put on the indirect versus direct responsibility. +2. The responsibility value +Let N be a set of finite cardinality n, called the set of players. Let A be a (n×n)- +matrix, with all its entries being real numbers between 0 and 1 and with the sum of +entries of each row being equal to 1, i.e., � +j∈N Ai,j = 1. We denote by R the set of +such matrices. Let GA be the weighted, directed graph the node set of which is N +and whose adjacency matrix is A. If A is symmetric, then GA is an undirected graph. +In the following, we interpret each edge (i, j), with i the source node and j the +terminal node, as an interaction between players i and j, with j benefiting from +actions of player i, and consequentially taking partial responsibility for this player. +The reader may think of a link (i, j) as a trade between a seller i and a buyer +j, and Ai,j being the proportion of purchases of j among the total volume of sales +of i. Note that there is a flow going through the directed graph: a responsibility flow. +We aim to allocate to every player a share of the total responsibility of the grand +coalition N in a way that a few basic properties are satisfied: +(1) the responsibility of a player for several time periods should be the sum of +its responsibilities of each single time period, +(2) two players that are connected in the same way to identical players should +be assigned the same amount of indirect responsibility, +(3) a player that does not benefit from any other player, that is, a player that +has no incoming edges, should not carry any indirect responsibility, +(4) no responsibility flow between two players in GA should be ignored, even +if this flow consists of a considerable amount of edges. Then, we want to +introduce some form of discounting in order to assign more responsibility to +a terminal node for source nodes of a short flow than for source nodes of a +long flow, + +A RESPONSIBILITY VALUE FOR DIGRAPHS +3 +(5) the responsibility for the impact of the entire group should be completely +shared among the members of the group. We want to ensure that the re- +sponsibility for every unit of impact is assigned to some player while simul- +taneously preventing double counting. +Then, let ι : N → R be a map that associates to every node of GA a real +value, which can be interpreted as a quantity of impact for which we want to assign +responsibility. Let (0, 1) denote the set of real values strictly included between 0 +and 1. For each γ ∈ (0, 1), we define a map ργ : R → Rn associating to each matrix +A ∈ R an n-dimensional vector ργ, called the responsibility value of (A, ι). For all +i ∈ N, the coordinate ργ +i (A, ι) corresponds to the total impact for which player i +should be held responsible, and is defined by +ργ +i (A, ι) = γ +� +k≥0 +(1 − γ)k +�� +p∈N +Ak +p,i ι(p) +� +. +As A is an adjacency matrix, Ak +p,i (the entry of Ak at (p, i)) counts the number +of walks from node p to node i of length k. For a given length k, we take into +account all the nodes of the graph that are connected to player i by summing over +all the players. Then, we sum over all the possible lengths k, with a discount factor +weighting the distance, that decreases the transfer of responsibility. So, for k = 0, +one obtains the direct responsibility, while k ≥ 1 yields the indirect responsibility. +We can interpret the factor γ as a discount factor, the value of which we discuss in +more detail in section 4. +In the following, we will show that the responsibility value satisfies the desired +properties. Denote by 0 the n-dimensional row vector (0, . . . , 0) and denote by 1 +the n-dimensional row vector (1, . . . , 1). Also, denote by ri(A) the i-th row of A and +by cj(A) the j-th column of A. We write ι(N) for � +p∈N ι(p). +Lemma 1. For all A ∈ R, for all k ∈ N and for all i, j ∈ N, we have +Ak +i,j ≤ 1, +� +j∈N +Ak +i,j = 1, +and +� +p∈N +Ak +p,iι(p) ≤ ι(N). +Proof. We prove the two first statements by induction on k. For k = 0, this is true +because A0 is the identity matrix. Then, assume that there exists k ∈ N such that, +for all i, j ∈ N, we have Ak +i,j ≤ 1 and � +j∈N Ak +i,j = 1. Then, +Ak+1 +i,j += +� +Ak · A +� +i,j = ⟨ri +� +Ak� +, cj (A)⟩ ≤ ⟨ri +� +Ak� +, 1⊤⟩ = 1. +Moreover, we have +� +j∈N +Ak+1 +i,j += +� +j∈N +�� +p∈N +Ak +i,p Ap,j +� += +� +p∈N +� +Ak +i,p +�� +j∈N +Ap,j +�� += +� +p∈N +Ak +i,p = 1. +The last statement directly follows from the first one. +□ + +4 +ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD +Proposition 1 (Convergence). Let γ ∈ (0, 1), let A ∈ R and let ι : N → R. For +all i ∈ N, we have ργ +i (A, ι) < ∞. +Proof. Let γ ∈ (0, 1). By Lemma 1, for any i, p ∈ N, we have � +p∈N Ak +p,iι(p) ≤ ι(N). +It leads, for any A ∈ R and any map ι : N → R, to +ργ +i (A, ι) ≤ γ +� +k≥0 +(1 − γ)kι(N). +Because ι(N) does not depend on k, we can put it out of the sum, and let γ and +� +k≥0(1 − γ)k cancel each other. Then, +ργ +i (A, ι) ≤ ι(N) < ∞. +□ +This result shows that even though we take into account all responsibility flows +as desired and therefore obtain a sum over all possible lengths k that is finite, the +discounting ensures that ργ always has finite coordinates. +Proposition 2 (Additivity). Let γ ∈ (0, 1) and let A ∈ R. For all i ∈ N and for +all pairs of maps ι1 and ι2, we have +ργ +i (A, ι1) + ργ +i (1, ι2) = ργ +i (A, ι1 + ι2). +Proof. Let γ ∈ (0, 1). For any player i ∈ N, we have +ργ +i (A, ι1 + ι2) = γ +� +k≥0 +(1 − γ)k +�� +p∈N +Ak +p,i (ι1 + ι2) (p) +� += γ +� +k≥0 +(1 − γ)k +�� +p∈N +� +Ak +p,i ι1(p) + Ak +p,i ι2(p) +� +� += γ +� +k≥0 +(1 − γ)k � +p∈N +Ak +p,i ι1(p) +� +�� +� +ργ +i (A,ι1) ++ γ +� +k≥0 +(1 − γ)k � +p∈N +Ak +p,i ι2(p) +� +�� +� +ργ +i (A,ι2) +. +□ +Thus, the responsibility of a player for several time periods is indeed the sum of +its responsibilities of each single time period, as desired. +Recall that 0 denotes the n-dimensional row vector (0, . . . , 0). +Definition 1. We say that i ∈ N is an independent player if we have ci (A) = 0⊤. +Lemma 2. For all independent players i ∈ N, and for all k ∈ N \ {0}, we have, +ci +� +Ak� += 0⊤. + +A RESPONSIBILITY VALUE FOR DIGRAPHS +5 +Proof. We prove it by induction on k. For k = 1, this is true by definition. Then, +assume that there exists k ∈ N \ {0} such that ri +� +Ak� += 0 and ci +� +Ak� += 0⊤. Then, +for any p ∈ N, we have, +Ak+1 +p,i += ⟨rp (A) , ci +� +Ak� +⟩ = ⟨rp (A) , 0⊤⟩ = 0, +then ci +� +Ak+1� += 0⊤. +□ +Proposition 3 (Independent player property). Let γ ∈ (0, 1), let A ∈ R and let +ι : N → R. We have +ργ +i (A, ι) = γι(i). +Proof. Let γ ∈ (0, 1) and let i ∈ N be an independent player. By Lemma 2, for any +p ∈ N and any k ∈ N \ {0}, we have Ak +p,i = 0. Then the sum � +p∈N Ak +p,i ι(p) equals +0 for k ∈ N \ {0}, so +ργ +i (A, ι) = γ +� +k=0 +(1 − γ)k +�� +p∈N +Ak +p,i ι(p) +� += γ +�� +p∈N +A0 +p,i ι(p) +� +. +A0 being the identity matrix, we have � +p∈N A0 +p,i ι(p) = A0 +i,i ι(i), thus +ργ +i (A, ι) = γι(i). +□ +We remark that the responsibility value holds the players responsible for a fraction +γ of their direct impact. Then, for player i ∈ N, we call the quantity γι(i) the +direct responsibility. Subsequently, to determine the total responsibility, we add the +responsibility of the indirect impacts. Independent players, i.e. players that do not +benefit from any other player, do not carry any indirect responsibility and are thus +just assigned a fraction γ of their direct impacts. +Definition 2. We say that two players i, j ∈ N are symmetric w.r.t. GA if +ri (A) = rj (A) +and +ci (A) = cj (A) . +Lemma 3. For two symmetric players i, j ∈ N, we have, for any k ∈ N, +ri +� +Ak� += rj +� +Ak� +and +ci +� +Ak� += cj +� +Ak� +. +Proof. We prove it by induction on k. For k = 0, this is true because A0 is the +identity matrix. Then, assume that there exists k ∈ N such that ri +� +Ak� += rj +� +Ak� +and ci +� +Ak� += cj +� +Ak� +. Then, for any p ∈ N, +Ak+1 +i,p += ⟨ri +� +Ak� +, cp (A)⟩ = ⟨rj +� +Ak� +, cp (A)⟩ = Ak+1 +j,p , +then ri +� +Ak+1� += rj +� +Ak+1� +. With similar calculations on the columns, we have +Ak+1 +p,i += ⟨rp (A) , ci +� +Ak� +⟩ = ⟨rp (A) , cj +� +Ak� +⟩ = Ak+1 +p,j , +then ci +� +Ak+1� += cj +� +Ak+1� +. +□ + +6 +ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD +Proposition 4 (Symmetry). Let γ ∈ (0, 1), let A ∈ R and let ι : N → R. For all +pairs of symmetric players i and j, we have +ργ +i (A, ι) − γι(i) = ργ +j (A, ι) − γι(j). +Proof. Let γ ∈ (0, 1) and let i, j ∈ N be two symmetric players. Then, +ργ +i (A, ι) − γι(i) = γ +� +k≥1 +(1 − γ)k +�� +p∈N +Ak +p,i ι(p) +� +. +Players i and j are symmetric, by Lemma 3 we have Ak +p,i = Ak +p,j, therefore +ργ +i (A, ι) − γι(i) = γ +� +k≥1 +(1 − γ)k +�� +p∈N +Ak +p,j ι(p) +� += ργ +j (A, ι) − γι(j). +□ +Since for player i ∈ N the assigned direct responsibility is given by γι(i), as we +saw in Prop. 3, it follows that ργ +i (A, ι) − γι(i) can be interpreted as the indirect +responsibility. Symmetry then implies the notion of fairness that two players that +are connected in the same way to identical players are assigned the same amount of +indirect responsibility. +Proposition 5 (Efficiency). Let γ ∈ (0, 1), let A ∈ R and let ι : N → R. We have +� +i∈N +ργ +i (A, ι) = ι(N). +Proof. Let γ ∈ (0, 1), let A ∈ R and let ι : N → R. For all i ∈ N, we have +� +i∈N +ργ +i (A, ι) = +� +i∈N +γ +� +k≥0 +(1 − γ)k +�� +p∈N +Ak +p,i ι(p) +� += γ +� +k≥0 +(1 − γ)k +�� +p∈N +�� +i∈N +Ak +p,i +� +ι(p) +� +By Lemma 1, we have that � +i∈N Ak +p,i = 1, and then +� +i∈N +ργ +i (A, ι) = γ +� +k≥0 +(1 − γ)kι(N) = ι(N). +□ +Efficiency ensures that all the units of emissions are assigned to some player and +prevents double counting. +To summarize, besides always taking on finite coordinates, our responsibility value +ργ always satisfies additivity, symmetry, the independent player property and effi- +ciency. Note that these properties resemble the axioms that Shapley used to define +its Shapley value [?] in cooperative game theory. + +A RESPONSIBILITY VALUE FOR DIGRAPHS +7 +3. Approximate solutions and numerical computation +3.1. ε-approximations. With the responsibility value, we aim to provide a prac- +tical tool. In the sequel, we show that, despite being an infinite sum, a precise +approximation for the responsibility value can be reached with just a few computa- +tions. Let γ ∈ (0, 1), let A ∈ R, let ι : N → R and let i ∈ N. Denote by ργ(A, ι)|q +the truncated responsibility value, defined, for all i ∈ N, by +ργ +i (A, ι)|q = γ +q +� +k=0 +(1 − γ)k +�� +p∈N +Ak +p,i ι(p) +� +. +The value ργ(A, ι)|q requires only a limited amount of calculations to be computed, +mainly q − 1 matrix multiplications. Given ε > 0, we define an ε-approximation +as any r ∈ Rn such that ∥ργ(A, ι) − r∥∞ ≤ ε, with ∥·∥∞ the L∞-norm of Rn. We +denote by qε the smallest integer such that ργ(A, ι)|qε is an ε-approximation, i.e., +qε := min +� +q ∈ N | ∀i ∈ N, |ργ +i (A, ι) − ργ +i (A, ι)|q| ≤ ε +� +. +By Lemma 1, for all i ∈ N we have � +p∈N Ak +p,i ι(p) ≤ ι(N), and then +∥ργ(A, ι) − ργ(A, ι)|q∥∞ ≤ γ ι(N) +� +k≥q+1 +(1 − γ)k. +Using the formulae of geometric series and partial geometric series, we have +� +k>q +(1 − γ)k = +� +k≥0 +(1 − γ)k − +q +� +k=0 +(1 − γ)k = 1 +γ − 1 − (1 − γ)q+1 +γ += (1 − γ)q+1 +γ +, +and then ∥ργ(A, ι) − ργ(A, ι)|q∥∞ ≤ ι(N)(1 − γ)q+1. Therefore, qε is determined by +(1 − γ)qε ≥ ι(N)ε ≥ (1 − γ)qε+1. +Using a computer, it is fairly easy to find qε, and then to compute an ε-approximation +r := ργ(A, ι)|qε. +By minimality of qε, r is the ε-approximation most efficiently +computed, in qε + 1 steps requiring qε − 1 matrix multiplications. +3.2. Numerical computation. For convenience, we denote ⃗ι the n-dimensional +vector defined, for all i ∈ N, by ⃗ιi = ι(i). +The formula for the responsibility +value becomes ργ +i (A, ι) = γ � +k≥0(1 − γ)k � +ci +� +Ak� +,⃗ι +� +. Denote by P q +γ (A) the matrix +derived from A defined by P q +γ (A) := �q +k=0 ((1 − γ)A)k. +This new matrix only +requires q − 1 matrix multiplications to be performed. Rearranging some terms, we +find a highly efficient formula to calculate approximations of the responsibility value +for all the players simultaneously: +ργ(A, ι)|q = γ⃗ι · P q +γ (A). + +8 +ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD +A = + + + + +0.1 +0 +0.1 +0.8 +0.2 +0 +0.2 +0.6 +0.1 +0 +0.1 +0.8 +0.5 +0 +0.5 +0 + + + + +4 +2 +3 +1 +0.1 +0.1 +0.8 +0.2 +0.2 +0.6 +0.1 +0.8 +0.5 +0.5 +Figure 1. Adjacency matrix A and its corresponding digraph GA +3.3. Example. Let γ = 0.6, let N = {1, 2, 3, 4}, let ι(i) = i, for all i ∈ N, and let +A be the adjacency matrix defined in Figure 1. +Notice that, in this case, players 1 and 3 are symmetric, and player 2 is an +independent player. By Prop. 3, we have that +ρ0.6 +2 (A, ι) = 0.6 · ι(2) = 1.2. +It means that player 2, as part of the whole group N, is responsible for an impact +of 1.2 among the global impact of ι(N) = 10. +Let ε := 10−3. To obtain an ε-approximation of ρ0.6(A, ι), we have to find qε that +is the first integer to be smaller than or equal to ι(N) · ε = 0.01, i.e., +(0.4)qε ≥ 0.01 ≥ (0.4)qε+1, +that is +q = 1 +q = 2 +q = 3 +q = 4 +q = 5 +q = 6 +(0.4)q +0.4 +0.16 +0.064 +0.0256 +0.01024 +0.004096 . +We find qe = 5. Then, we need to compute the matrix P 5 +0.6(A), which is approxi- +mately +P 5 +0.6(A) ≃ + + + + +1.129 +0 +0.129 +0.401 +0.160 +1 +0.160 +0.341 +0.129 +0 +1.129 +0.401 +0.251 +0 +0.251 +1.158 + + + + . + +A RESPONSIBILITY VALUE FOR DIGRAPHS +9 +We can now finish the calculations: +ρ0.6(A, ι) ≃ +�0.6 +1.2 +1.8 +2.4� +· + + + + +1.129 +0 +0.129 +0.401 +0.160 +1 +0.160 +0.341 +0.129 +0 +1.129 +0.401 +0.251 +0 +0.251 +1.158 + + + + += +�1.704 +1.2 +2.904 +4.151� +. +We indeed obtained the right value for player 2. We see that player 2 and player +3 carry less responsibility than their direct impact, while player 1 and player 4 are +held responsible for less than their direct impacts. +ρ0.6 +1 (A, ι) − γι(1) ≃ 1.704 − 0.6 = 1.104, +ρ0.6 +3 (A, ι) − γι(3) ≃ 2.904 − 1.8 = 1.104, +satisfying the symmetry property. +4. The parameter γ +The parameter γ ∈ (0, 1) plays an important role in the responsibility measure +and determines how much emphasis is put on direct responsibility with respect to +indirect responsibility. Recall that for player i ∈ N with direct impact ι(i), the +direct responsibility a player has is given by γι(i), to which we subsequently add +the indirect responsibility. To get some intuition for the parameter γ, consider the +following example in Figure 2, that consists of N = {1, 2}, A′ +1,2 = A′ +2,2 = 1 and +A′ +1,1 = A′ +2,1 = 0. Moreover, the impacts are given by ι(1) = 1 and ι(2). In words, +player 1 sells all of her output to player 2, who in return consumes all her output +herself. Since ι(2), the activities of player 2 do not result in any impact. Then, let +us evaluate the assigned responsibilities. For player 1, we obtain +ργ +1(A′, ι) = γι(1) = γ, +while for player 2, we find +ργ +2(A′, ι) = (1 − γ)ι(1) + ι(2) = (1 − γ). +A′ = +� +0 +1 +0 +1 +� +1 +2 +1 +1 +Figure 2. Adjacency matrix A′ and its corresponding digraph GA′ +Observe that when γ < 1 +2, player 2 carries more responsibility for the impact of +player 1 than player 1 itself. In fact, even though its own impact is zero, player 2 has +a greater total responsibility than the player 1 who does carry direct responsibility. +On the one hand, if one believes that a player should be responsible for its own +actions, it could therefore be argued that a value of γ < 1 +2 is unreasonable. On the + +10 +ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD +other hand, one could argue that γ < 1 +2 is legitimate since it reflects the power or +leverage a player has. In the current example, player 2 is the only player to whom +player 1 supplies, making player 2 the reason why player 1 has the impacts. +If +player 2 were to cease buying from player 1, player 1 would, in the given situation, +no longer have any incentive to keep producing the impacts. Thus, the power that +player 2 has, is reflected in this high responsibility value. These are moral or political +questions which are not easy to find a universal answer to. In our model, one can +adapt the value of γ in order to be in line with the point of view on these dilemmas +one chooses to take. +5. Conclusions +As a response to the increasing need to assign responsibility to a player for impacts +that take place elsewhere in the value chain, whether such impacts are negative +impacts like the emission of greenhouse gases, the underpayment of employees or +positive impacts such as the transition to renewable energies, we developed a method +to assign direct as well as indirect responsibility to players, based on the underlying +graph and the direct impacts of every player. Since this responsibility value is shown +to satisfy a set of desirable properties, it can be applied in many situations and on +many different scales, as players can be interpreted as either countries, firms or +individuals. For example, if we consider the impacts to be carbon emissions, the +responsible value could play a major role in achieving the emission reduction efforts. +References +[1] Schneider, L., Kollmuss, A., & Lazarus, M. (2015). Addressing the risk of double counting +emission reductions under the UNFCCC. Climatic Change, 131(4), 473-486. +[2] Shapley, L. S. (2016). 17. A value for n-person games. In Contributions to the Theory of Games +(AM-28), Volume II (pp. 307-318). Princeton University Press. +[3] World Resources Institute and World Business Council on Sustainable Development (2004). +The greenhouse gas protocol. World Resources Institute, Washington, D.C., DC, Mar. +Centre d’´Economie de la Sorbonne, Universit´e Paris I Panth´eon-Sorbonne, 106- +112 boulevard de l’Hˆopital, 75013, Paris, France & University of Bielefeld, Uni- +versit¨atsstrasse 25, 33615 Bielefeld, Germany +Email address: rosa.ende@gmail.com +Centre d’´Economie de la Sorbonne, Universit´e Paris I Panth´eon-Sorbonne, 106- +112 boulevard de l’Hˆopital, 75013, Paris, France +Email address: dylan.laplace.mermoud@gmail.com + diff --git a/YNE0T4oBgHgl3EQf3wIQ/content/tmp_files/load_file.txt b/YNE0T4oBgHgl3EQf3wIQ/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a2740bd9985dafda15ebebaf2b71e4437ca53d8 --- /dev/null +++ b/YNE0T4oBgHgl3EQf3wIQ/content/tmp_files/load_file.txt @@ -0,0 +1,330 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf,len=329 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='02728v1 [econ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='TH] 6 Jan 2023 A RESPONSIBILITY VALUE FOR DIGRAPHS ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' There is an increasing need to hold players responsible for negative or positive impact that take place elsewhere in a value chain or a network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For example, countries or companies are held more and more responsible for their indi- rect carbon emissions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We introduce a responsibility value that allocates the total impact of the value chain among the players, taking into account their direct im- pact and their indirect impact through the underlying graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Moreover, we show that the responsibility value satisfies a set of natural yet important properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Introduction We see that there is an increasing urge to account for environmental or social impact throughout a supply chain or network: through the choices players in such network make, for example by choosing to buy or not buy from a certain supplier, they carry some responsibility for not only their own impacts, but also for the impacts that take place upstream of their own economic activities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Examples of impacts for which indirect responsibility could be assigned to players include but are not limited to the emission of greenhouse gases, the underpayment of employees, the disposal of hazardous waste, but possibly also positive impacts such as the tran- sition to renewable energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Assigning this responsibility and consequently holding these players liable for such impacts, can incentivise the increase of positive impacts or the decrease of negative impacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' However, as of now, it is not clear how to assign responsibility for indirect im- pacts to players in a given network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In the case where the considered impacts are the emissions of greenhouse gases (GHG), there is consensus that players carry some responsibility for their indirect emissions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' This could be either the energy required to run their processes, known as scope 2 emissions, or for all the other emissions the sources of which were not controlled by the player: the scope 3 emissions [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' However, to which extent these players are responsible for their scope 2 and 3 emis- sions, remains unclear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Moreover, in this context, the same unit of emission could be claimed twice;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' when the emission falls within one scope for one player and under another scope for another player.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' The double counting of the same reduction efforts Date: January 10, 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 2020 Mathematics Subject Classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' MSC Primary 91B32;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Secondary 05C20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' This work has received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Sk�lodowska-Curie grant agreement No 956107, “Economic Policy in Complex Environments (EPOC)”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 1 2 ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD that might subsequently occur is a major policy concern when trying to achieve the emission reduction goals [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We thus aim to clarify and quantify the indirect responsibility for impacts in a general manner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Based on the impact of every player and the underlying graph of the players, we introduce a responsibility value that assigns to every player a responsibility for the social or environmental impact of itself and the nodes it benefits from.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We show that this value satisfies certain properties, that resemble the axioms that Shapley used in cooperative game theory to define the Shapley value [?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In section 3 we offer a method to approximate a solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Subsequently, we discuss the interpretation of the value of the discount factor that plays an important role on the emphasis that is put on the indirect versus direct responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' The responsibility value Let N be a set of finite cardinality n, called the set of players.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let A be a (n×n)- matrix, with all its entries being real numbers between 0 and 1 and with the sum of entries of each row being equal to 1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=', � j∈N Ai,j = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We denote by R the set of such matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let GA be the weighted, directed graph the node set of which is N and whose adjacency matrix is A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' If A is symmetric, then GA is an undirected graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In the following, we interpret each edge (i, j), with i the source node and j the terminal node, as an interaction between players i and j, with j benefiting from actions of player i, and consequentially taking partial responsibility for this player.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' The reader may think of a link (i, j) as a trade between a seller i and a buyer j, and Ai,j being the proportion of purchases of j among the total volume of sales of i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Note that there is a flow going through the directed graph: a responsibility flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We aim to allocate to every player a share of the total responsibility of the grand coalition N in a way that a few basic properties are satisfied: (1) the responsibility of a player for several time periods should be the sum of its responsibilities of each single time period,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' (2) two players that are connected in the same way to identical players should be assigned the same amount of indirect responsibility,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' (3) a player that does not benefit from any other player,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' that is,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' a player that has no incoming edges,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' should not carry any indirect responsibility,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' (4) no responsibility flow between two players in GA should be ignored,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' even if this flow consists of a considerable amount of edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, we want to introduce some form of discounting in order to assign more responsibility to a terminal node for source nodes of a short flow than for source nodes of a long flow, A RESPONSIBILITY VALUE FOR DIGRAPHS 3 (5) the responsibility for the impact of the entire group should be completely shared among the members of the group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We want to ensure that the re- sponsibility for every unit of impact is assigned to some player while simul- taneously preventing double counting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, let ι : N → R be a map that associates to every node of GA a real value, which can be interpreted as a quantity of impact for which we want to assign responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let (0, 1) denote the set of real values strictly included between 0 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For each γ ∈ (0, 1), we define a map ργ : R → Rn associating to each matrix A ∈ R an n-dimensional vector ργ, called the responsibility value of (A, ι).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For all i ∈ N, the coordinate ργ i (A, ι) corresponds to the total impact for which player i should be held responsible, and is defined by ργ i (A, ι) = γ � k≥0 (1 − γ)k �� p∈N Ak p,i ι(p) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' As A is an adjacency matrix, Ak p,i (the entry of Ak at (p, i)) counts the number of walks from node p to node i of length k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For a given length k, we take into account all the nodes of the graph that are connected to player i by summing over all the players.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, we sum over all the possible lengths k, with a discount factor weighting the distance, that decreases the transfer of responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' So, for k = 0, one obtains the direct responsibility, while k ≥ 1 yields the indirect responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We can interpret the factor γ as a discount factor, the value of which we discuss in more detail in section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In the following, we will show that the responsibility value satisfies the desired properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Denote by 0 the n-dimensional row vector (0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' , 0) and denote by 1 the n-dimensional row vector (1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' , 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Also, denote by ri(A) the i-th row of A and by cj(A) the j-th column of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We write ι(N) for � p∈N ι(p).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For all A ∈ R, for all k ∈ N and for all i, j ∈ N, we have Ak i,j ≤ 1, � j∈N Ak i,j = 1, and � p∈N Ak p,iι(p) ≤ ι(N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We prove the two first statements by induction on k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For k = 0, this is true because A0 is the identity matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, assume that there exists k ∈ N such that, for all i, j ∈ N, we have Ak i,j ≤ 1 and � j∈N Ak i,j = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, Ak+1 i,j = � Ak · A � i,j = ⟨ri � Ak� , cj (A)⟩ ≤ ⟨ri � Ak� , 1⊤⟩ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Moreover, we have � j∈N Ak+1 i,j = � j∈N �� p∈N Ak i,p Ap,j � = � p∈N � Ak i,p �� j∈N Ap,j �� = � p∈N Ak i,p = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' The last statement directly follows from the first one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' □ 4 ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD Proposition 1 (Convergence).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1), let A ∈ R and let ι : N → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For all i ∈ N, we have ργ i (A, ι) < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' By Lemma 1, for any i, p ∈ N, we have � p∈N Ak p,iι(p) ≤ ι(N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' It leads, for any A ∈ R and any map ι : N → R, to ργ i (A, ι) ≤ γ � k≥0 (1 − γ)kι(N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Because ι(N) does not depend on k, we can put it out of the sum, and let γ and � k≥0(1 − γ)k cancel each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, ργ i (A, ι) ≤ ι(N) < ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' □ This result shows that even though we take into account all responsibility flows as desired and therefore obtain a sum over all possible lengths k that is finite, the discounting ensures that ργ always has finite coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proposition 2 (Additivity).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1) and let A ∈ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For all i ∈ N and for all pairs of maps ι1 and ι2, we have ργ i (A, ι1) + ργ i (1, ι2) = ργ i (A, ι1 + ι2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For any player i ∈ N, we have ργ i (A, ι1 + ι2) = γ � k≥0 (1 − γ)k �� p∈N Ak p,i (ι1 + ι2) (p) � = γ � k≥0 (1 − γ)k �� p∈N � Ak p,i ι1(p) + Ak p,i ι2(p) � � = γ � k≥0 (1 − γ)k � p∈N Ak p,i ι1(p) � �� � ργ i (A,ι1) + γ � k≥0 (1 − γ)k � p∈N Ak p,i ι2(p) � �� � ργ i (A,ι2) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' □ Thus, the responsibility of a player for several time periods is indeed the sum of its responsibilities of each single time period, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Recall that 0 denotes the n-dimensional row vector (0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' , 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We say that i ∈ N is an independent player if we have ci (A) = 0⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For all independent players i ∈ N, and for all k ∈ N \\ {0}, we have, ci � Ak� = 0⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' A RESPONSIBILITY VALUE FOR DIGRAPHS 5 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We prove it by induction on k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For k = 1, this is true by definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, assume that there exists k ∈ N \\ {0} such that ri � Ak� = 0 and ci � Ak� = 0⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, for any p ∈ N, we have, Ak+1 p,i = ⟨rp (A) , ci � Ak� ⟩ = ⟨rp (A) , 0⊤⟩ = 0, then ci � Ak+1� = 0⊤.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' □ Proposition 3 (Independent player property).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1), let A ∈ R and let ι : N → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We have ργ i (A, ι) = γι(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1) and let i ∈ N be an independent player.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' By Lemma 2, for any p ∈ N and any k ∈ N \\ {0}, we have Ak p,i = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then the sum � p∈N Ak p,i ι(p) equals 0 for k ∈ N \\ {0}, so ργ i (A, ι) = γ � k=0 (1 − γ)k �� p∈N Ak p,i ι(p) � = γ �� p∈N A0 p,i ι(p) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' A0 being the identity matrix, we have � p∈N A0 p,i ι(p) = A0 i,i ι(i), thus ργ i (A, ι) = γι(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' □ We remark that the responsibility value holds the players responsible for a fraction γ of their direct impact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, for player i ∈ N, we call the quantity γι(i) the direct responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Subsequently, to determine the total responsibility, we add the responsibility of the indirect impacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Independent players, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' players that do not benefit from any other player, do not carry any indirect responsibility and are thus just assigned a fraction γ of their direct impacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We say that two players i, j ∈ N are symmetric w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' GA if ri (A) = rj (A) and ci (A) = cj (A) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For two symmetric players i, j ∈ N, we have, for any k ∈ N, ri � Ak� = rj � Ak� and ci � Ak� = cj � Ak� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We prove it by induction on k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For k = 0, this is true because A0 is the identity matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, assume that there exists k ∈ N such that ri � Ak� = rj � Ak� and ci � Ak� = cj � Ak� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, for any p ∈ N, Ak+1 i,p = ⟨ri � Ak� , cp (A)⟩ = ⟨rj � Ak� , cp (A)⟩ = Ak+1 j,p , then ri � Ak+1� = rj � Ak+1� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' With similar calculations on the columns, we have Ak+1 p,i = ⟨rp (A) , ci � Ak� ⟩ = ⟨rp (A) , cj � Ak� ⟩ = Ak+1 p,j , then ci � Ak+1� = cj � Ak+1� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' □ 6 ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD Proposition 4 (Symmetry).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1), let A ∈ R and let ι : N → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For all pairs of symmetric players i and j, we have ργ i (A, ι) − γι(i) = ργ j (A, ι) − γι(j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1) and let i, j ∈ N be two symmetric players.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, ργ i (A, ι) − γι(i) = γ � k≥1 (1 − γ)k �� p∈N Ak p,i ι(p) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Players i and j are symmetric, by Lemma 3 we have Ak p,i = Ak p,j, therefore ργ i (A, ι) − γι(i) = γ � k≥1 (1 − γ)k �� p∈N Ak p,j ι(p) � = ργ j (A, ι) − γι(j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' □ Since for player i ∈ N the assigned direct responsibility is given by γι(i), as we saw in Prop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 3, it follows that ργ i (A, ι) − γι(i) can be interpreted as the indirect responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Symmetry then implies the notion of fairness that two players that are connected in the same way to identical players are assigned the same amount of indirect responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proposition 5 (Efficiency).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1), let A ∈ R and let ι : N → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We have � i∈N ργ i (A, ι) = ι(N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1), let A ∈ R and let ι : N → R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For all i ∈ N, we have � i∈N ργ i (A, ι) = � i∈N γ � k≥0 (1 − γ)k �� p∈N Ak p,i ι(p) � = γ � k≥0 (1 − γ)k �� p∈N �� i∈N Ak p,i � ι(p) � By Lemma 1, we have that � i∈N Ak p,i = 1, and then � i∈N ργ i (A, ι) = γ � k≥0 (1 − γ)kι(N) = ι(N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' □ Efficiency ensures that all the units of emissions are assigned to some player and prevents double counting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' To summarize, besides always taking on finite coordinates, our responsibility value ργ always satisfies additivity, symmetry, the independent player property and effi- ciency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Note that these properties resemble the axioms that Shapley used to define its Shapley value [?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='] in cooperative game theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' A RESPONSIBILITY VALUE FOR DIGRAPHS 7 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Approximate solutions and numerical computation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' ε-approximations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' With the responsibility value, we aim to provide a prac- tical tool.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In the sequel, we show that, despite being an infinite sum, a precise approximation for the responsibility value can be reached with just a few computa- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ ∈ (0, 1), let A ∈ R, let ι : N → R and let i ∈ N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Denote by ργ(A, ι)|q the truncated responsibility value, defined, for all i ∈ N, by ργ i (A, ι)|q = γ q � k=0 (1 − γ)k �� p∈N Ak p,i ι(p) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' The value ργ(A, ι)|q requires only a limited amount of calculations to be computed, mainly q − 1 matrix multiplications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Given ε > 0, we define an ε-approximation as any r ∈ Rn such that ∥ργ(A, ι) − r∥∞ ≤ ε, with ∥·∥∞ the L∞-norm of Rn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We denote by qε the smallest integer such that ργ(A, ι)|qε is an ε-approximation, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=', qε := min � q ∈ N | ∀i ∈ N, |ργ i (A, ι) − ργ i (A, ι)|q| ≤ ε � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' By Lemma 1, for all i ∈ N we have � p∈N Ak p,i ι(p) ≤ ι(N), and then ∥ργ(A, ι) − ργ(A, ι)|q∥∞ ≤ γ ι(N) � k≥q+1 (1 − γ)k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Using the formulae of geometric series and partial geometric series, we have � k>q (1 − γ)k = � k≥0 (1 − γ)k − q � k=0 (1 − γ)k = 1 γ − 1 − (1 − γ)q+1 γ = (1 − γ)q+1 γ , and then ∥ργ(A, ι) − ργ(A, ι)|q∥∞ ≤ ι(N)(1 − γ)q+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Therefore, qε is determined by (1 − γ)qε ≥ ι(N)ε ≥ (1 − γ)qε+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Using a computer, it is fairly easy to find qε, and then to compute an ε-approximation r := ργ(A, ι)|qε.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' By minimality of qε, r is the ε-approximation most efficiently computed, in qε + 1 steps requiring qε − 1 matrix multiplications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Numerical computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For convenience, we denote ⃗ι the n-dimensional vector defined, for all i ∈ N, by ⃗ιi = ι(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' The formula for the responsibility value becomes ργ i (A, ι) = γ � k≥0(1 − γ)k � ci � Ak� ,⃗ι � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Denote by P q γ (A) the matrix derived from A defined by P q γ (A) := �q k=0 ((1 − γ)A)k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' This new matrix only requires q − 1 matrix multiplications to be performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Rearranging some terms, we find a highly efficient formula to calculate approximations of the responsibility value for all the players simultaneously: ργ(A, ι)|q = γ⃗ι · P q γ (A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 8 ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD A = \uf8eb \uf8ec \uf8ec \uf8ed 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='5 0 \uf8f6 \uf8f7 \uf8f7 \uf8f8 4 2 3 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='5 Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Adjacency matrix A and its corresponding digraph GA 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let γ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6, let N = {1, 2, 3, 4}, let ι(i) = i, for all i ∈ N, and let A be the adjacency matrix defined in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Notice that, in this case, players 1 and 3 are symmetric, and player 2 is an independent player.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' By Prop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 3, we have that ρ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6 2 (A, ι) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6 · ι(2) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' It means that player 2, as part of the whole group N, is responsible for an impact of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2 among the global impact of ι(N) = 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Let ε := 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' To obtain an ε-approximation of ρ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6(A, ι), we have to find qε that is the first integer to be smaller than or equal to ι(N) · ε = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='01, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=', (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='4)qε ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='01 ≥ (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='4)qε+1, that is q = 1 q = 2 q = 3 q = 4 q = 5 q = 6 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='4)q 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='064 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='0256 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='01024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='004096 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We find qe = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, we need to compute the matrix P 5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6(A), which is approxi- mately P 5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6(A) ≃ \uf8eb \uf8ec \uf8ec \uf8ed 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='129 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='129 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='401 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='160 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='160 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='341 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='129 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='129 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='401 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='251 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='251 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='158 \uf8f6 \uf8f7 \uf8f7 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' A RESPONSIBILITY VALUE FOR DIGRAPHS 9 We can now finish the calculations: ρ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6(A, ι) ≃ �0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='4� \uf8eb \uf8ec \uf8ec \uf8ed 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='129 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='129 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='401 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='160 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='160 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='341 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='129 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='129 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='401 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='251 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='251 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='158 \uf8f6 \uf8f7 \uf8f7 \uf8f8 = �1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='704 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='904 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='151� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We indeed obtained the right value for player 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' We see that player 2 and player 3 carry less responsibility than their direct impact, while player 1 and player 4 are held responsible for less than their direct impacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' ρ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6 1 (A, ι) − γι(1) ≃ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='704 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='104, ρ0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='6 3 (A, ι) − γι(3) ≃ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='904 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='8 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='104, satisfying the symmetry property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' The parameter γ The parameter γ ∈ (0, 1) plays an important role in the responsibility measure and determines how much emphasis is put on direct responsibility with respect to indirect responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Recall that for player i ∈ N with direct impact ι(i), the direct responsibility a player has is given by γι(i), to which we subsequently add the indirect responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' To get some intuition for the parameter γ, consider the following example in Figure 2, that consists of N = {1, 2}, A′ 1,2 = A′ 2,2 = 1 and A′ 1,1 = A′ 2,1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Moreover, the impacts are given by ι(1) = 1 and ι(2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In words, player 1 sells all of her output to player 2, who in return consumes all her output herself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Since ι(2), the activities of player 2 do not result in any impact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Then, let us evaluate the assigned responsibilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For player 1, we obtain ργ 1(A′, ι) = γι(1) = γ, while for player 2, we find ργ 2(A′, ι) = (1 − γ)ι(1) + ι(2) = (1 − γ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' A′ = � 0 1 0 1 � 1 2 1 1 Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Adjacency matrix A′ and its corresponding digraph GA′ Observe that when γ < 1 2, player 2 carries more responsibility for the impact of player 1 than player 1 itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In fact, even though its own impact is zero, player 2 has a greater total responsibility than the player 1 who does carry direct responsibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' On the one hand, if one believes that a player should be responsible for its own actions, it could therefore be argued that a value of γ < 1 2 is unreasonable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' On the 10 ROSA VAN DEN ENDE AND DYLAN LAPLACE MERMOUD other hand, one could argue that γ < 1 2 is legitimate since it reflects the power or leverage a player has.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In the current example, player 2 is the only player to whom player 1 supplies, making player 2 the reason why player 1 has the impacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' If player 2 were to cease buying from player 1, player 1 would, in the given situation, no longer have any incentive to keep producing the impacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Thus, the power that player 2 has, is reflected in this high responsibility value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' These are moral or political questions which are not easy to find a universal answer to.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In our model, one can adapt the value of γ in order to be in line with the point of view on these dilemmas one chooses to take.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Conclusions As a response to the increasing need to assign responsibility to a player for impacts that take place elsewhere in the value chain, whether such impacts are negative impacts like the emission of greenhouse gases, the underpayment of employees or positive impacts such as the transition to renewable energies, we developed a method to assign direct as well as indirect responsibility to players, based on the underlying graph and the direct impacts of every player.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Since this responsibility value is shown to satisfy a set of desirable properties, it can be applied in many situations and on many different scales, as players can be interpreted as either countries, firms or individuals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' For example, if we consider the impacts to be carbon emissions, the responsible value could play a major role in achieving the emission reduction efforts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' References [1] Schneider, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=', Kollmuss, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=', & Lazarus, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Addressing the risk of double counting emission reductions under the UNFCCC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Climatic Change, 131(4), 473-486.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' [2] Shapley, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' A value for n-person games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' In Contributions to the Theory of Games (AM-28), Volume II (pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' 307-318).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Princeton University Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' [3] World Resources Institute and World Business Council on Sustainable Development (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' The greenhouse gas protocol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' World Resources Institute, Washington, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=', DC, Mar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content=' Centre d’´Economie de la Sorbonne, Universit´e Paris I Panth´eon-Sorbonne, 106- 112 boulevard de l’Hˆopital, 75013, Paris, France & University of Bielefeld, Uni- versit¨atsstrasse 25, 33615 Bielefeld, Germany Email address: rosa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='ende@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='com Centre d’´Economie de la Sorbonne, Universit´e Paris I Panth´eon-Sorbonne, 106- 112 boulevard de l’Hˆopital, 75013, Paris, France Email address: dylan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='laplace.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='mermoud@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} +page_content='com' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YNE0T4oBgHgl3EQf3wIQ/content/2301.02728v1.pdf'} diff --git a/YdFLT4oBgHgl3EQfVC8l/content/tmp_files/2301.12051v1.pdf.txt b/YdFLT4oBgHgl3EQfVC8l/content/tmp_files/2301.12051v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..b79f83e1ca27060ae5e4da2835ec49a5c1af99bb --- /dev/null +++ b/YdFLT4oBgHgl3EQfVC8l/content/tmp_files/2301.12051v1.pdf.txt @@ -0,0 +1,450 @@ +Predicting Students’ Exam Scores +Using Physiological Signals +Willie Kang∗1,2, Sean Kim∗1,3, Eliot Yoo∗1,4, and Samuel Kim1 +1 IF Research Lab, La Palma, CA, USA +2 El Toro High School, Lake Forest, CA, USA +3 Oxford Academy, Cypress, CA, USA +4 Cypress High School, Cypress, CA, USA +{wildmanwillie25, seankim.hahjean, philliot1304}@gmail.com +sam@ifresearchlab.com +Abstract— While acute stress has been shown to have +both positive and negative effects on performance, not +much is known about the impacts of stress on students’ +grades during examinations. To answer this question, we +examined whether a correlation could be found between +physiological stress signals and exam performance. We +conducted this study using multiple physiological signals +of ten undergraduate students over three different exams. +The study focused on three signals, i.e., skin temperature, +heart rate, and electrodermal activity. We extracted +statistics as features and fed them into a variety of binary +classifiers to predict relatively higher or lower grades. +Experimental results showed up to 0.81 ROC-AUC with +k-nearest neighbor algorithm among various machine +learning algorithms. +I. INTRODUCTION +College students are prone to stress due to the highly +transitional and demanding nature of their lives, which +may be because of rigorous academic requirements, +an unfamiliar environment, and separation from home. +Academic stress is a regular part of the lives of students, +and may result from pressures to perform, perceptions +of workloads and exams, and time restraints [1]. Failure +to cope with such high stress can lead to various nega- +tive effects. Severe academic stress decreases academic +performance and hinders the ability to study effectively +[2], [3]. Overall, stress has been shown to negatively +impact sleep quality, well being, and affectivity, which +in turn negatively impacts general health [4]. +Additionally, students may experience more severe +issues during examination season. This period is often +marked by high stress and anticipation, with numerous +∗ These authors contributed equally. The names are listed in an +alphabetical order. +important projects, papers, and exams all colliding. +During this time, sleep quality has been shown to +decrease and caffeine consumption has been shown to +increase [5], [6]. +Students are also adversely impacted by test anxiety. +Higher levels of cognitive test anxiety have been asso- +ciated with significantly lower test scores [7]. A study +of nursing students has also shown that test anxiety +causes physical, emotional, and cognitive detriments, +which hinders academic success [8]. There also exists +an inverse relationship between test anxiety and grade +point average in both graduate and undergraduate stu- +dents [9]. +Exam stress and anxiety is a significant problem +that affects all students. Working on this issue can +lead to not only academic improvements, but physical +and mental health benefits. Being able to predict exam +performance through common physiological signals +that correlate with stress can serve as a useful tool +to help address the issue of test anxiety. Therefore, +this study aims to look at the viability of predicting +exam scores with physiological signals using machine +learning algorithms. +II. PROCEDURE +A. Data Source +The data we used was collected from a study con- +ducted at the University of Houston on eleven under- +graduate students (nine males, two females) who were +tracked across three major exams: two midterms and +a final exam [10]. The students wore E4 wristbands +that measured skin conductance, electrodermal activity +(EDA), heart rate, blood volume pulse, skin surface +temperature, inter-beat interval, and accelerometer data. +arXiv:2301.12051v1 [cs.LG] 28 Jan 2023 + +(a) Skin temperature - Midterm 1 +(b) Skin temperature - Midterm 2 +(c) Skin temperature - Final +(d) Heart Rate - Midterm 1 +(e) Heart Rate - Midterm 2 +(f) Heart Rate - Final +(g) Electrodermal activity - Midterm 1 +(h) Electrodermal activity - Midterm 2 +(i) Electrodermal activity - Final +Fig. 1: Physiological signals of the individual students during exams. +Of the eleven participants, one student was provided +additional accommodations due to the University of +Houston disability accommodation guidelines. Data +from this participant was discarded as it involved a fac- +tor not consistent with the other participants. See [11] +for more details. +For our research, we chose to incorporate skin tem- +perature, heart rate, and EDA measurements. Figure 1 +shows the selected physiological signals of individual +students collected during different examinations. +B. Pre-Processing +Firstly, +we +synchronized +all +the +measurements +aligned at the same timestamp. Since the data was +collected in an asynchronized manner, we dropped out +any measurements that are outside of common time +periods. +Secondly, we found some outliers and missing values +in measurements. Therefore, we applied a filtering +method, moving average low-pass filter to be specific, +to remove possible noise and outliers. +Lastly, the physiological signals can be influenced +by personal biases and environmental factors. For ex- +ample, individual skin temperatures can be influenced +by the room temperature and some students can have +innately higher heart rates then the others. To mitigate +these biases, we normalized the data before inputting +the data into the machine learning algorithms. The +normalization was done both on a student-level and a +test-level. We used the z-normalization so that indi- +vidual instruments have zero means and unit standard +deviations, i.e. +x(t) = x(t) − µ +σ +(1) +where x(t) represents measured value of the instrument +at time t, and µ and σ are the notation for the average +and the standard deviation of the measurement over +time, respectively. + +34 +(. 3 ane,edugl 4xs +32 +30 +26 +tz +22 +0 +DOEZ +4000 +DO +Time (sec)35 +25 +2 +15 +0 +2400 +4000 +DO+9 +DO t8 +Time (sec)35 +.3 ane,adual +30 +25 +24 +15 +0 +2400 +4000 +DO+9 +DOt8 +12000 +14000 +Time (sec)240 +10 +(wda) +160 +Rate +140 +120 +140 +E:+ +0 +2400 +4000 +DO+9 +DO t8 +Time [sec]240 +0 +160 +Rate +140 +120 + +1+0 +E:+ ++ +2400 +4000 +DO+9 +DO t8 +Time [sec]10 +160 +140 +Rate +120 +100 ++ +0 +200 +4000 +6+00 +8:+00 +1200D +14000 +Time (sec)4 +3 +Activily +3 +2 +Electrode +1 +0 +2400 +4000 +DO+9 +DO t8 +Time (sec)L +6 +5 +4 +Electrodermal +3 +2 +1 +2400 +4000 +DO+9 +DO t8 +Time (sec)1 +s +6 +1 Activity +5 +4 +Electri +2 +1 +0 +2400 +4000 +DO+9 +DO t8 +12000 +14000 +Tirme (sec)Fig. 2: Basic diagram of each validation step for one-student- +leave-out experimental setup. +C. Feature Extraction +As described earlier, we used skin temperature, heart +rate, and EDA of the students. After the pre-processing, +we extracted the statistics of a physiological signal as +feature vectors to the instrument during an exam. The +statistics consist of mean, standard deviation, minimum, +maximum, and median (the feature dimension is 5). +Then, we concatenate all the features to create one +super-vector to represent overall physiological behav- +iors during the exam (the dimension of the super-vector +is 15). +Since one student takes three different exams, i.e. +two midterms and one final, each student will have +three different physiological behavior features and cor- +responding test scores. +III. EXPERIMENTS +A. Experimental Setup +We used all the features regardless of exam types +so that each student has three different scores and +corresponding physiological features. The train and test +sets were split in a one-student-leave-out way which +means nine students would be used to train the classifier +and the other student would be used to test it. This +creates 10-fold cross-validation, and each validation +task consists of 27 training samples and 3 test samples. +Figure 2 illustrates this scenario as a simple diagram. +We designed the experiments as binary classification +tasks. In this regard, we built models to classify whether +students received a score higher than 80 +We repeated the experiments 10 times so that we +can get the average performance of individual machine +learning algorithms. +B. Classifiers +Multiple machine learning models were used. Using +a diverse amount of classifiers allows for the various +algorithms to search for a correlation between the stress +signal values and the performance of the student. These +machine learning models were the Random Forest (RF, +with a gridsearch technique for best parameters in each +validation task), Stochastic Gradient Descent (SGD, +with log-loss), Support Vector Machine (SVM, with +RBF kernel and C = 1), and k-nearest neighbor (KNN, +with k = 5) classifiers. +C. Results +Figure 3 and Table I show the results of the binary +classification tasks in terms of ROC-AUC using various +machine learning algorithms. Overall, the KNN gave +the best results with a 0.81 ROC-AUC on average in +the relationship between stress levels and high scoring +on exams. This classifier shows that there exists a +correlation between stress and test scores that could be +further investigated to find a stronger relation on how +stress levels can affect the performance of a student. +The SVM Classifier produced the second best results +with a 0.80 ROC-AUC in the relationship between +stress and exam scores. This further shows that there is +a considerable correlation between stress and scores. +On the other hand, RF and SGD did not yield +sufficient ROC-AUC scores, which indicate that those +machine learning algorithms are not performing well +enough to model the relationship between physiological +behaviors and test scores. +D. Limitations +One limitation of our study is the small number +of statistics extracted from the chosen physiological +signals during feature extraction. We only utilized basic +statistics as features. Using more comprehensive fea- +tures may serve to better map the physiological signals +to the exam scores. Furthermore, analyzing a larger +dataset may help improve the accuracy of results. + +One-Student-Leave-OutTestSet +Evaluation +Physiological Behaviors +Test Score +Physiological Behaviors +Classifier +Test Score +Model +Physiological Behaviors +Test Score +Model +Training +One-Student-Leave-OutTtrainngSet +Physiological Behaviors +Test Score +PhysiologicalBehaviors +Test Score +Physiological Behaviors +Test Score +Physiological Behaviors +Test Score +Physiological Behaviors +Test Score +Physiological Behaviors +Test ScoreFig. 3: ROC curves with various machine learning algo- +rithms. +RF +SGD +SVM +KNN +ROC-AUC +0.54 +0.56 +0.80 +0.81 +(0.09) +(0.06) +(0.06) +(0.00) +TABLE I: Average ROC-AUC scores (standard deviation) +with various machine learning algorithms. +IV. CONCLUSION +The present research examined how stress affects +academic performance through physiological signals. +The results of this study support the initial hypothe- +sis, suggesting a correlation between stress and exam +results. These preliminary results have multiple impli- +cations for future research and further developments +in the field. By looking at stress measurements, we +can formulate strategies to maximize academic perfor- +mance by looking at optimal levels of stress. Addi- +tionally, we can identify what factors have the greatest +impact on stress and academic performance. Certain +physiological signals may have detrimental effects on +performance as they increase, while others may func- +tion in the inverse. Measurements that have the greatest +impact on academic performance can be further in- +vestigated through various research and testing. The +hypothesis attempted to form a general correlation +based on the limited data and information available, but +the opportunities for further improvement and different +program creation are abundant. We expect future works +to use this research as a foundation for more elaboration +and growth within the fields of academics and stress. +REFERENCES +[1] Dalia Bedewy and Adel Gabriel, “Examining perceptions of +academic stress and its sources among university students: +The perception of academic stress scale,” Health Psychology +Open, vol. 2, no. 2, pp. 205510291559671, 2015. +[2] Mussarat Khan, +“Effect of perceived academic stress on +students’ performance,” FWU Journal of Social Sciences, 08 +2018. +[3] Nudrat Sohail, +“Stress and academic performance among +medical students,” Journal of the College of Physicians and +Surgeons–Pakistan : JCPSP, vol. 23, pp. 67–71, 01 2013. +[4] Kathrin Wunsch, Nadine Kasten, and Reinhard Fuchs, “The +effect of physical activity on sleep quality, well-being, and +affect in academic stress periods,” +Nature and Science of +Sleep, vol. Volume 9, pp. 117–126, 2017. +[5] Matthias Zunhammer, Peter Eichhammer, and Volker Busch, +“Sleep quality during exam stress: The role of alcohol, caf- +feine and nicotine,” PLoS ONE, vol. 9, no. 10, 2014. +[6] Rachel Campbell, Bart Soenens, Wim Beyers, and Maarten +Vansteenkiste, +“University students’ sleep during an exam +period: The role of basic psychological needs and stress,” +Motivation and Emotion, vol. 42, no. 5, pp. 671–681, 2018. +[7] Jerrell C. Cassady and Ronald E. Johnson, “Cognitive test +anxiety and academic performance,” Contemporary Educa- +tional Psychology, vol. 27, no. 2, pp. 270–295, 2002. +[8] April L. Shapiro, “Test anxiety among nursing students: A +systematic review,” Teaching and Learning in Nursing, vol. +9, no. 4, pp. 193–202, 2014. +[9] Mark S. Chapell, Z. Benjamin Blanding, Michael E. Silver- +stein, Masami Takahashi, Brian Newman, Aaron Gubi, and +Nicole McCann, “Test anxiety and academic performance in +undergraduate and graduate students.,” Journal of Educational +Psychology, vol. 97, no. 2, pp. 268–274, 2005. +[10] M. R. Amin, D. Wickramasuriya, and R. T. Faghih, +“A +wearable exam stress dataset for predicting grades using +physiological signals,” +2022 IEEE Healthcare Innovations +and Point of Care Technologies (HI-POCT), 2022. +[11] M. R. Amin, D. Wickramasuriya, and R. T. Faghih, “A wear- +able exam stress dataset for predicting cognitive performance +in real-world settings,” PhysioNet, 2022. + +1.0 +0.8 +Rate +0.6 +True Positive +0.4 +0.2 +Random Forest +Stochastic GradientDescent +Support Vector +0.0 +KNearest Neighbors +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +FalsePositiveRate \ No newline at end of file diff --git a/YdFLT4oBgHgl3EQfVC8l/content/tmp_files/load_file.txt b/YdFLT4oBgHgl3EQfVC8l/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..b88439f1d7fad010b1d988c6090a37d5f7267297 --- /dev/null +++ b/YdFLT4oBgHgl3EQfVC8l/content/tmp_files/load_file.txt @@ -0,0 +1,272 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf,len=271 +page_content='Predicting Students’ Exam Scores Using Physiological Signals Willie Kang∗1,2, Sean Kim∗1,3, Eliot Yoo∗1,4, and Samuel Kim1 1 IF Research Lab, La Palma, CA, USA 2 El Toro High School, Lake Forest, CA, USA 3 Oxford Academy, Cypress, CA, USA 4 Cypress High School, Cypress, CA, USA {wildmanwillie25, seankim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='hahjean, philliot1304}@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='com sam@ifresearchlab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='com Abstract— While acute stress has been shown to have both positive and negative effects on performance, not much is known about the impacts of stress on students’ grades during examinations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' To answer this question, we examined whether a correlation could be found between physiological stress signals and exam performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' We conducted this study using multiple physiological signals of ten undergraduate students over three different exams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The study focused on three signals, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=', skin temperature, heart rate, and electrodermal activity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' We extracted statistics as features and fed them into a variety of binary classifiers to predict relatively higher or lower grades.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Experimental results showed up to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='81 ROC-AUC with k-nearest neighbor algorithm among various machine learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' INTRODUCTION College students are prone to stress due to the highly transitional and demanding nature of their lives, which may be because of rigorous academic requirements, an unfamiliar environment, and separation from home.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Academic stress is a regular part of the lives of students, and may result from pressures to perform, perceptions of workloads and exams, and time restraints [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Failure to cope with such high stress can lead to various nega- tive effects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Severe academic stress decreases academic performance and hinders the ability to study effectively [2], [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Overall, stress has been shown to negatively impact sleep quality, well being, and affectivity, which in turn negatively impacts general health [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Additionally, students may experience more severe issues during examination season.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' This period is often marked by high stress and anticipation, with numerous ∗ These authors contributed equally.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The names are listed in an alphabetical order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' important projects, papers, and exams all colliding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' During this time, sleep quality has been shown to decrease and caffeine consumption has been shown to increase [5], [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Students are also adversely impacted by test anxiety.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Higher levels of cognitive test anxiety have been asso- ciated with significantly lower test scores [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' A study of nursing students has also shown that test anxiety causes physical, emotional, and cognitive detriments, which hinders academic success [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' There also exists an inverse relationship between test anxiety and grade point average in both graduate and undergraduate stu- dents [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Exam stress and anxiety is a significant problem that affects all students.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Working on this issue can lead to not only academic improvements, but physical and mental health benefits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Being able to predict exam performance through common physiological signals that correlate with stress can serve as a useful tool to help address the issue of test anxiety.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Therefore, this study aims to look at the viability of predicting exam scores with physiological signals using machine learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' PROCEDURE A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Data Source The data we used was collected from a study con- ducted at the University of Houston on eleven under- graduate students (nine males, two females) who were tracked across three major exams: two midterms and a final exam [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The students wore E4 wristbands that measured skin conductance, electrodermal activity (EDA), heart rate, blood volume pulse, skin surface temperature, inter-beat interval, and accelerometer data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='12051v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='LG] 28 Jan 2023 (a) Skin temperature - Midterm 1 (b) Skin temperature - Midterm 2 (c) Skin temperature - Final (d) Heart Rate - Midterm 1 (e) Heart Rate - Midterm 2 (f) Heart Rate - Final (g) Electrodermal activity - Midterm 1 (h) Electrodermal activity - Midterm 2 (i) Electrodermal activity - Final Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 1: Physiological signals of the individual students during exams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Of the eleven participants, one student was provided additional accommodations due to the University of Houston disability accommodation guidelines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Data from this participant was discarded as it involved a fac- tor not consistent with the other participants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' See [11] for more details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' For our research, we chose to incorporate skin tem- perature, heart rate, and EDA measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Figure 1 shows the selected physiological signals of individual students collected during different examinations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Pre-Processing Firstly, we synchronized all the measurements aligned at the same timestamp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Since the data was collected in an asynchronized manner, we dropped out any measurements that are outside of common time periods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Secondly, we found some outliers and missing values in measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Therefore, we applied a filtering method, moving average low-pass filter to be specific, to remove possible noise and outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Lastly, the physiological signals can be influenced by personal biases and environmental factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' For ex- ample, individual skin temperatures can be influenced by the room temperature and some students can have innately higher heart rates then the others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' To mitigate these biases, we normalized the data before inputting the data into the machine learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The normalization was done both on a student-level and a test-level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' We used the z-normalization so that indi- vidual instruments have zero means and unit standard deviations, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' x(t) = x(t) − µ σ (1) where x(t) represents measured value of the instrument at time t, and µ and σ are the notation for the average and the standard deviation of the measurement over time, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 34 (.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 3 ane,edugl 4xs 32 30 26 tz 22 0 DOEZ 4000 DO Time (sec)35 25 2 15 0 2400 4000 DO+9 DO t8 Time (sec)35 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='3 ane,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='adual ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='24 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='15 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO+9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DOt8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='12000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='14000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Time (sec)240 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='(wda) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='160 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Rate ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='140 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='120 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='140 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='E:+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO+9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO t8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Time [sec]240 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='160 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Rate ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='140 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='120 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='1+0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='E:+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO+9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO t8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Time [sec]10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='160 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='140 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Rate ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='120 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='6+00 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='8:+00 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='1200D ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='14000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Time (sec)4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Activily ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Electrode ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO+9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO t8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Time (sec)L ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Electrodermal ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO+9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO t8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Time (sec)1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='s ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='1 Activity ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Electri ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO+9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='DO t8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='12000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='14000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='Tirme (sec)Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 2: Basic diagram of each validation step for one-student- leave-out experimental setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Feature Extraction As described earlier, we used skin temperature, heart rate, and EDA of the students.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' After the pre-processing, we extracted the statistics of a physiological signal as feature vectors to the instrument during an exam.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The statistics consist of mean, standard deviation, minimum, maximum, and median (the feature dimension is 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Then, we concatenate all the features to create one super-vector to represent overall physiological behav- iors during the exam (the dimension of the super-vector is 15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Since one student takes three different exams, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' two midterms and one final, each student will have three different physiological behavior features and cor- responding test scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' EXPERIMENTS A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Experimental Setup We used all the features regardless of exam types so that each student has three different scores and corresponding physiological features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The train and test sets were split in a one-student-leave-out way which means nine students would be used to train the classifier and the other student would be used to test it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' This creates 10-fold cross-validation, and each validation task consists of 27 training samples and 3 test samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Figure 2 illustrates this scenario as a simple diagram.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' We designed the experiments as binary classification tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' In this regard, we built models to classify whether students received a score higher than 80 We repeated the experiments 10 times so that we can get the average performance of individual machine learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Classifiers Multiple machine learning models were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Using a diverse amount of classifiers allows for the various algorithms to search for a correlation between the stress signal values and the performance of the student.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' These machine learning models were the Random Forest (RF, with a gridsearch technique for best parameters in each validation task), Stochastic Gradient Descent (SGD, with log-loss), Support Vector Machine (SVM, with RBF kernel and C = 1), and k-nearest neighbor (KNN, with k = 5) classifiers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Results Figure 3 and Table I show the results of the binary classification tasks in terms of ROC-AUC using various machine learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Overall, the KNN gave the best results with a 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='81 ROC-AUC on average in the relationship between stress levels and high scoring on exams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' This classifier shows that there exists a correlation between stress and test scores that could be further investigated to find a stronger relation on how stress levels can affect the performance of a student.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The SVM Classifier produced the second best results with a 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='80 ROC-AUC in the relationship between stress and exam scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' This further shows that there is a considerable correlation between stress and scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' On the other hand, RF and SGD did not yield sufficient ROC-AUC scores, which indicate that those machine learning algorithms are not performing well enough to model the relationship between physiological behaviors and test scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Limitations One limitation of our study is the small number of statistics extracted from the chosen physiological signals during feature extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' We only utilized basic statistics as features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Using more comprehensive fea- tures may serve to better map the physiological signals to the exam scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Furthermore, analyzing a larger dataset may help improve the accuracy of results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' One-Student-Leave-OutTestSet Evaluation Physiological Behaviors Test Score Physiological Behaviors Classifier Test Score Model Physiological Behaviors Test Score Model Training One-Student-Leave-OutTtrainngSet Physiological Behaviors Test Score PhysiologicalBehaviors Test Score Physiological Behaviors Test Score Physiological Behaviors Test Score Physiological Behaviors Test Score Physiological Behaviors Test ScoreFig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 3: ROC curves with various machine learning algo- rithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' RF SGD SVM KNN ROC-AUC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='56 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='81 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='09) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='06) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='06) (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='00) TABLE I: Average ROC-AUC scores (standard deviation) with various machine learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' CONCLUSION The present research examined how stress affects academic performance through physiological signals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The results of this study support the initial hypothe- sis, suggesting a correlation between stress and exam results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' These preliminary results have multiple impli- cations for future research and further developments in the field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' By looking at stress measurements, we can formulate strategies to maximize academic perfor- mance by looking at optimal levels of stress.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Addi- tionally, we can identify what factors have the greatest impact on stress and academic performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Certain physiological signals may have detrimental effects on performance as they increase, while others may func- tion in the inverse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Measurements that have the greatest impact on academic performance can be further in- vestigated through various research and testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' The hypothesis attempted to form a general correlation based on the limited data and information available, but the opportunities for further improvement and different program creation are abundant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' We expect future works to use this research as a foundation for more elaboration and growth within the fields of academics and stress.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' REFERENCES [1] Dalia Bedewy and Adel Gabriel, “Examining perceptions of academic stress and its sources among university students: The perception of academic stress scale,” Health Psychology Open, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 2, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 205510291559671, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [2] Mussarat Khan, “Effect of perceived academic stress on students’ performance,” FWU Journal of Social Sciences, 08 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [3] Nudrat Sohail, “Stress and academic performance among medical students,” Journal of the College of Physicians and Surgeons–Pakistan : JCPSP, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 23, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 67–71, 01 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [4] Kathrin Wunsch, Nadine Kasten, and Reinhard Fuchs, “The effect of physical activity on sleep quality, well-being, and affect in academic stress periods,” Nature and Science of Sleep, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Volume 9, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 117–126, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [5] Matthias Zunhammer, Peter Eichhammer, and Volker Busch, “Sleep quality during exam stress: The role of alcohol, caf- feine and nicotine,” PLoS ONE, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 9, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 10, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [6] Rachel Campbell, Bart Soenens, Wim Beyers, and Maarten Vansteenkiste, “University students’ sleep during an exam period: The role of basic psychological needs and stress,” Motivation and Emotion, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 42, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 671–681, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [7] Jerrell C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Cassady and Ronald E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Johnson, “Cognitive test anxiety and academic performance,” Contemporary Educa- tional Psychology, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 27, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 270–295, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [8] April L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Shapiro, “Test anxiety among nursing students: A systematic review,” Teaching and Learning in Nursing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 9, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 193–202, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [9] Mark S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Chapell, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Benjamin Blanding, Michael E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Silver- stein, Masami Takahashi, Brian Newman, Aaron Gubi, and Nicole McCann, “Test anxiety and academic performance in undergraduate and graduate students.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=',” Journal of Educational Psychology, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 97, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 268–274, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [10] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Amin, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Wickramasuriya, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Faghih, “A wearable exam stress dataset for predicting grades using physiological signals,” 2022 IEEE Healthcare Innovations and Point of Care Technologies (HI-POCT), 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Amin, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Wickramasuriya, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' Faghih, “A wear- able exam stress dataset for predicting cognitive performance in real-world settings,” PhysioNet, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='8 Rate 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='6 True Positive 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2 Random Forest Stochastic GradientDescent Support Vector 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 KNearest Neighbors 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} +page_content='0 FalsePositiveRate' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/YdFLT4oBgHgl3EQfVC8l/content/2301.12051v1.pdf'} diff --git a/ZNE2T4oBgHgl3EQfZQdS/content/tmp_files/2301.03862v1.pdf.txt b/ZNE2T4oBgHgl3EQfZQdS/content/tmp_files/2301.03862v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..df2009743343133887b86f881eb5a020dd6fa8f5 --- /dev/null +++ b/ZNE2T4oBgHgl3EQfZQdS/content/tmp_files/2301.03862v1.pdf.txt @@ -0,0 +1,733 @@ +Proportionally Fair Matching with Multiple Groups +Sayan Bandyapadhyay ∗ +Fedor V. Fomin†. +Tanmay Inamdar‡ +Kirill Simonov§ +Abstract +The study of fair algorithms has become mainstream in machine learning and artificial +intelligence due to its increasing demand in dealing with biases and discrimination. Along this +line, researchers have considered fair versions of traditional optimization problems including +clustering, regression, ranking and voting. However, most of the efforts have been channeled +into designing heuristic algorithms, which often do not provide any guarantees on the quality of +the solution. In this work, we study matching problems with the notion of proportional fairness. +Proportional fairness is one of the most popular notions of group fairness where every group is +represented up to an extent proportional to the final selection size. Matching with proportional +fairness or more commonly, proportionally fair matching, was introduced in [Chierichetti et +al., AISTATS, 2019], where the problem was studied with only two groups. However, in many +practical applications, the number of groups—although often a small constant—is larger than +two. In this work, we make the first step towards understanding the computational complexity +of proportionally fair matching with more than two groups. We design exact and approximation +algorithms achieving reasonable guarantees on the quality of the matching as well as on the +time complexity. Our algorithms are also supported by suitable hardness bounds. +1 +Introduction +Machine learning (ML) algorithms are ubiquitous in today’s world, constantly playing crucial roles +in decision-making which has an immeasurable impact on human lives. These algorithms trained +on past instances are extremely powerful and most of the time output correct solutions without +making any error. However, in recent times, these algorithms have faced critiques for being biased +towards underrepresented groups [4, 21, 27, 10]. Consequently, researchers have made efforts in +understanding how biases are introduced in the ML pipeline and whether it is possible to get rid of +them. This research has given rise to an entire subfield called fairness in ML. All the work done so +far in this budding subfield can broadly be classified into two types. The first one studies different +notions of fairness and their interactions [11, 22, 25, 19, 39, 18]. These works essentially show +that there is no universal definition of fairness that captures all the scenarios and it is not possible +to satisfy different fairness notions simultaneously. In the second type of works, researchers have +studied fair versions of classical problems incorporating suitable notions of fairness from the first +type. Notably the problems considered include clustering [15, 32, 7, 5], regression [34, 9, 1], ranking +[13], voting [12] and matching [16]. +In this paper, we consider the proportionally fair matching problem. Matching appears natu- +rally in several applications in ML, e.g., assigning products to customers [44]; students to schools +[40]; reviewers to manuscripts [14]; and workers to firms [2]. There are scores of works that study +fair versions of matchings [16, 31, 28, 35, 38, 26]. Among these distinct notions of matchings, our +work is most relevant to (α, β)-balanced matching [16]. (α, β)-balanced matching was formulated +∗Portland State University, USA +†University of Bergen, Norway +‡University of Bergen, Norway +§Technische Universit¨at Wien, Austria +1 +arXiv:2301.03862v1 [cs.DS] 10 Jan 2023 + +by [16] by bringing proportional fairness and maximum cardinality matching together. Propor- +tional fairness is based on the concept of disparate impact [25], which in the context of matching +is defined as follows. A matching is (α, β)-balanced or proportionally fair if the ratio between the +number of edges from each group and the size of the matching is at least α and at most β. +As a motivating example of proportionally fair matching, consider the product recommendation +problem in e-commerce. With the advancement of digital marketing and advertising, nowadays +companies are interested in more fine-tuned approaches that help them reach the target groups of +customers. These groups may be representative of certain underlying demographic categorizations +into based on gender, age group, geographic location etc. Thus, the number of groups is often a +small constant. In particular, in this contemporary setting, one is interested in finding assignments +that involve customers from all target groups and have a balanced impact on all these groups. This +assignment problem can be modeled as the proportionally fair matching problem between customers +and products. In a realistic situation, one might need to assign many products to a customer and +many customers to a product. +This can be achieved by computing multiple matchings in an +iterative manner while removing the edges from the input graph that are already matched. +In a seminal work, [16] obtained a polynomial-time 3/2-approximation when the number of +groups is 2. However, in many real-world situations, like in the above example, it is natural to +assume that the number of target groups is more than 2. Unfortunately, the algorithm of [16] +strongly exploits the fact that the number of groups ℓ = 2. It is not clear how to adapt or extend +their algorithm when we have more than two groups. The only known algorithm prior to our +work for ℓ > 2 groups was an nO(ℓ)-time randomized exact algorithm [16]. The running time of +this algorithm has a “bad” exponential dependence on the number of groups, i.e., the running +time is not a fixed polynomial in n. Thus, this algorithm quickly becomes impractical if ℓ grows. +Our research on proportionally fair matching is driven by the following question. Do there exist +efficient algorithms with guaranteed performance for proportionally fair matching when the number +of groups ℓ is more than two? +1.1 +Our results and contributions +In this work, we obtain several results on the Proportionally Fair Matching problem with +any arbitrary ℓ number of groups. +• First, we show that the problem is extremely hard for any general ℓ number of groups, in the +sense that it is not possible to obtain any approximation algorithm in 2o(ℓ)nO(1) time even +on path graphs, unless the Exponential Time Hypothesis (ETH) [33] is false. +• To complement our hardness result, we design a 1/4ℓ-approximation algorithm that runs in +2O(ℓ)nO(1) time. Our algorithm might violate the lower (α) and upper (β) bounds by at +most a multiplicative factor of (1 + 4ℓ/|OPT|) if |OPT| is more than 4ℓ2, where OPT is +any optimum solution. Thus, the violation factor is at most 1 + 1/ℓ, and tends to 1 with +asymptotic values of |OPT|. +• We also consider a restricted case of the problem, referred to as the β-limited case in [16], +where we only have the upper bound, i.e., no edges might be present from some groups. In +this case, we could improve the approximation factor to 1/2ℓ and running time to polynomial. +• Lastly, we show that the parameterized version of the problem where one seeks for a propor- +tionally fair matching of size k, can be solved exactly in 2O(k)nO(1) time. Thus the problem +is fixed-parameter tractable parameterized by k. +All of our algorithms are based on simple schemes. Our approximation algorithms use an iterative +peeling scheme that in each iteration, extracts a rainbow matching containing at most one edge +2 + +from every group. The exact algorithm is based on a non-trivial application of the celebrated color- +coding scheme [3]. These algorithms appear in Sections 3, 4, and 5, respectively. The hardness +proof is given in Section 6. +1.2 +Related work +In recent years, researchers have introduced and studied several different notions of fairness, e.g., +disparate impact [25], statistical parity [47, 36], individual fairness [22] and group fairness [23]. +Kleinberg et al. [39] formulated three notions of fairness and showed that it is theoretically impos- +sible to satisfy them simultaneously. See also [18, 17] for similar exposures. +The notion of proportional fairness with multiple protected groups is widely studied in the +literature, which is based on disparate impact [25]. Bei et al. [6] studied the proportional candidate +selection problem, where the goal is to select a subset of candidates with various attributes from +a given set while satisfying certain proportional fairness constraints. Goel et al. [29] considered +the problem of learning non-discriminatory and proportionally fair classifiers and proposed the +weighted sum of logs technique. Proportional fairness has also been considered in the context of +Federated learning [49]. +Additionally, proportional fairness has been studied in the context of +numerous optimization problems including voting [24], scheduling [37, 41], Kidney exchange [45], +and Traveling Salesman Problem [43]. +Several different fair matching problems have been studied in the literature. [31] studied fair +b-matching, where matching preferences for each vertex are given as ranks, and the goal is to +avoid assigning vertices to high ranked preferences as much as possible. Fair-by-design-matching +is studied in [28], where instead of a single matching, a probability distribution over all feasible +matchings is computed which guarantees individual fairness. See also [35, 38]. +Apart from the fair versions of matchings, many constrained versions are also studied [46, 8]. +[46] studied the Bounded Color Matching (BCM) problem where edges are colored and from each +color class, only a given number of edges can be chosen. BCM is a special case of 3-set packing +and, hence, admits a 3/4-approximation [46]. We note that the β-limited case of Proportionally +Fair Matching is a special case of BCM and, thus, a 3/4-approximation follows in this case where +the upper bound might be violated by 3/4 factor. One should compare this factor with our violation +factor, which asymptotically tends to 1. +2 +Preliminaries +For an integer ℓ ≥ 1, let [ℓ] := {1, 2, . . . , ℓ}. Consider any undirected n-vertex graph G = (V, E) +such that the edges in E are colored by colors in C = {1, . . . , ℓ}. The function χ : E → C describes +the color assignment. For each color c ∈ C, let Ec be the set of edges colored by the color c, i.e., +Ec = χ−1(c). A subset E′ ⊆ E is a matching in G if no two edges in E′ share a common vertex. +Definition 1. (α, β)-balanced matching. Given 0 ≤ α ≤ β ≤ 1, a matching M ⊆ E is called +(α, β)-balanced if for each color c ∈ C, we have that +α ≤ |M ∩ Ec| +|M| +≤ β. +Thus a matching is (α, β)-balanced if it contains at least α and at most β fraction of edges from +every color. In the Proportionally Fair Matching problem, the goal is to find a maximum- +sized (α, β)-balanced matching. In the restricted β-limited case of the problem, α = 0, i.e., we +only have the upper bound. +For γ ≤ 1 and ∆ ≥ 1, a (γ, ∆)-approximation algorithm for Proportionally Fair Matching +computes a matching of size at least γ · |OPT|, where every color appears in at least α/∆ fraction +of the edges and in at most β · ∆ fraction. OPT is an optimum (α, β)-balanced matching. +A matching is called a rainbow matching if all of its edges have distinct colors. We will need +the following result due to Gupta et al. [30]. +3 + +Theorem 1. For some integer k > 0, suppose there is a rainbow matching in G of size k. There +is a 2k · nO(1) time algorithm that computes a rainbow matching of size k. +3 +A ( 1 +4ℓ, 1+ +4ℓ +|OPT|)-Approximation for Proportionally Fair Matching +In this section, we design an approximation algorithm for Proportionally Fair Matching. Let +OPT be an optimum (α, β)-balanced matching, OPTc = OPT∩Ec. We design two algorithms: one +for the case when α > 0 and the other for the complementary β-limited case. In this section, we +slightly abuse the notation, and use OPT (resp. OPTc for some color c ∈ C) to refer to |OPT| (resp. +|OPTc|). The intended meaning should be clear from the context; however we will disambiguate +in case there is a possibility of confusion. +First, we consider the α > 0 case. Immediately, we have the following observation. +Observation 1. For any color c ∈ C, OPT contains at least one edge of color c and, hence, G +contains a rainbow matching of size ℓ. +Our algorithm runs in rounds. In the following, we define a round. The input in each round is +a subgraph G′ = (V ′, E′) of G. +Round. Initially M = ∅. For every color 1 ≤ c ≤ ℓ, do the following in an iterative manner. If +there is no edge of color c in G′, go to the next color or terminate and return (G′, M) if c = ℓ. +Otherwise, pick any edge e of color c from G′ and add e to the already computed matching M. +Remove all the edges (including e) from G′ that share a common vertex with e. Repeat the process +for the next color with the current (or updated) graph G′ or terminate and return (G′, M) if c = ℓ. +Thus in each round, we try to pick a rainbow matching in a greedy manner. Next, we describe +our algorithm. The most challenging part of our algorithm is to ensure that the final matching +computed is (α, β)-balanced modulo a small factor, i.e., we need to ensure both the lower and +the upper bounds within a small factor for each color. Note that just the above greedy way of +picking edges might not even ensure that at least one edge is selected from each color. We use the +algorithm of [30] in the beginning to overcome this barrier. However, the rest of our algorithm is +extremely simple. +The Algorithm. We assume that we know the size of OPT. We describe later how to remove +this assumption. Apply the algorithm in Theorem 1 on G to compute a rainbow matching M′ +of size ℓ. If OPT ≤ 4ℓ2, return M := M′ as the solution and terminate. Otherwise, remove all +the edges of M′ and the edges adjacent to them from G to obtain the graph G0. Initialize M to +M′. Greedily pick matched edges in rounds using the Round procedure and add them to M until +exactly ⌈OPT/(4ℓ)⌉ edges are picked in total. In particular, the graph G0 is the input to the 1-st +round and G1 is the output graph of the 1-st round. G1 is the input to the 2-nd round and G2 +is the output graph of the 2-nd round, and so on. Note that it might be the case that the last +round is not completed fully if the size of M is reached to ⌈OPT/(4ℓ)⌉ before the completion of +the round. +Note that the above algorithm is oblivious to α and β in the sense that it never uses these +values. Nevertheless, we prove that the computed matching is (α, β)-balanced modulo a small +factor. Now we analyze our algorithm. +3.1 +The Analysis +Let Mc = M ∩ Ec. Also, let c∗ be a color c ∈ C such that |OPTc| is the minimum at c = c∗. +Observation 2. α ≤ 1/ℓ ≤ β. +Proof. Let ˆc be a color c ∈ C such that |OPTc| is the minimum at c = ˆc. By definition, OPT +≥ ℓ · OPTc∗, or OPTc∗/OPT ≤ 1/ℓ. Thus, α ≤ OPTc∗/OPT ≤ 1/ℓ. Similarly, OPT ≤ ℓ · OPTˆc, +or OPTˆc/OPT ≥ 1/ℓ. Thus, β ≥ OPTˆc/OPT ≥ 1/ℓ. +4 + +First we consider the case when OPT ≤ 4ℓ2. +In this case the returned matching M is a +rainbow matching of size exactly ℓ. The existence of such a matching follows by Observation 1. +Thus, we immediately obtain a 4ℓ-approximation. As |Mc|/|M| = 1/ℓ in this case, by Observation +2, α ≤ |Mc|/|M| ≤ β. Thus we obtain the desired result. In the rest of the proof, we analyze the +case when OPT > 4ℓ2. We start with the following lemma. +Lemma 1. The algorithm successfully computes a matching of size exactly ⌈OPT/(4ℓ)⌉. Moreover, +for each color c with OPTc > 4ℓ and round i ∈ [1, ⌈OPTc/(4ℓ)⌉ − 1], Gi−1 contains an edge of +color c. +Proof. Note that by Observation 1, the algorithm in Theorem 1 successfully computes a rainbow +matching M′ of size ℓ. Now consider any color c such that OPTc ≤ 4ℓ. For such a color, M +already contains at least 1 ≥ ⌈OPTc/(4ℓ)⌉ edge. Now consider any other color c with |OPTc| > 4ℓ. +Consider the rainbow matching M′ computed in the beginning. As |M′| = ℓ, the edges of M′ +can be adjacent to at most 2ℓ edges from OPT, since it is a matching. In particular, the edges +of M′ can be adjacent to at most 2ℓ edges from the set OPTc. +Hence, G0 contains at least +OPTc − 2ℓ edges of the set OPTc. Now consider the execution of round i ≥ 1. At most ℓ edges +are chosen in this round. Hence, these edges can be adjacent to at most 2ℓ edges of OPTc. It +follows that at most 2ℓ fewer edges of the set OPTc are contained in Gi compared to Gi−1. As G0 +has at least OPTc − 2ℓ edges from the set OPTc of color c and OPTc > 4ℓ, for each of the first +⌈(OPTc−2ℓ)/(2ℓ)⌉ = ⌈OPTc/(2ℓ)⌉−1 rounds, the algorithm will be able to pick an edge of color c. +Thus from such a color c with OPTc > 4ℓ, it can safely pick at least ⌈OPTc/(2ℓ)⌉ ≥ ⌈OPTc/(4ℓ)⌉ +edges in total. Now, as OPT = � +c OPTc, � +c∈C⌈OPTc/(4ℓ)⌉ ≥ ⌈OPT/(4ℓ)⌉. It follows that the +algorithm can pick at least ⌈OPT/(4ℓ)⌉ edges. As we stop the algorithm as soon as the size of M +reaches to ⌈OPT/(4ℓ)⌉, the lemma follows. +Note that the claimed approximation factor trivially follows from the above lemma. Next, we +show that M is (α, β)-balanced modulo a small factor that asymptotically tends to 1 with the size +of OPT. +Lemma 2. For each color c ∈ C, |Mc| ≥ |OPTc∗|/(4ℓ). +Proof. If OPTc∗ ≤ 4ℓ, |Mc| ≥ 1 ≥ OPTc∗/(4ℓ). So, assume that OPTc∗ > 4ℓ. Now suppose |Mc| < +OPTc∗/(4ℓ) for some c. By Lemma 1, for each of the first ⌈OPTc/(4ℓ)⌉ − 1 ≥ ⌈OPTc∗/(4ℓ)⌉ − 1 +rounds, Gi−1 contains an edge of color c. It follows that the algorithm was forcibly terminated in +some round i ≤ (OPTc∗/(4ℓ)) − 1. Thus, the number of edges chosen from each color c′ ̸= c is at +most OPTc∗/(4ℓ). Hence, +|M| = +� +c′̸=c +|Mc′| + |Mc| +< (ℓ − 1) · (OPTc∗/(4ℓ)) + (OPTc∗/(4ℓ)) +≤ ⌈OPT/(4ℓ)⌉. +This contradicts Lemma 1, which states that we select exactly ⌈OPT/(4ℓ)⌉ edges. +Corollary 1. For each color c ∈ C, (|Mc|/|M|) ≥ +α +(1+4ℓ/OPT). +Proof. By Lemma 2, |Mc| ≥ OPTc∗/(4ℓ). +|Mc| +|M| ≥ (OPTc∗/(4ℓ)) +⌈OPT/(4ℓ)⌉ ≥ +(OPTc∗/(4ℓ)) +(OPT/(4ℓ)) + 1 += (OPTc∗)/(OPT) +(1 + 4ℓ/OPT) +≥ +α +(1 + 4ℓ/OPT). +The last inequality follows as OPT satisfies the lower bound for all colors. +5 + +Now we turn to proving the upper bound. Let α∗ = OPTc∗/OPT. +Lemma 3. For each color c ∈ C, |Mc| ≤ +β +α∗ · (OPTc∗/(4ℓ)) + 1. +Proof. Suppose for some c ∈ C, |Mc| > +β +α∗ · (OPTc∗/(4ℓ)) + 1. Then the number of rounds is +strictly greater than +β +α∗ · (OPTc∗/(4ℓ)). Now, for any c′, OPTc′ ≥ α∗ · OPT and OPTc′ ≤ β · OPT. +Thus, by the definition of α∗, +β +α∗ · OPTc∗ ≥ OPTc′. It follows that, for each c′, the number of +rounds is strictly greater than OPTc′/(4ℓ). Hence, for each c′ ∈ C, more than (OPTc′/(4ℓ)) + 1 +edges have been chosen. Thus, the total number of edges chosen is strictly larger than +� +c′∈C +((OPTc′/(4ℓ)) + 1) ≥ ⌈OPT/(4ℓ)⌉. +This contradicts Lemma 1, which states that we select exactly ⌈OPT/(4ℓ)⌉ edges. +Corollary 2. For each color c ∈ C, (|Mc|/|M|) ≤ β · (1 + +4ℓ +OPT). +Proof. By Lemma 3, +|Mc| +|M| ≤ (β/α∗) · (OPTc∗/(4ℓ)) + 1 +⌈OPT/(4ℓ)⌉ +≤ (β/α∗) · (OPTc∗/(4ℓ)) + (β/α∗) +OPT/(4ℓ) += β +α∗ · OPTc∗ +OPT · +� +1 + +4ℓ +OPT +� += β +α∗ · α∗ +� +1 + +4ℓ +OPT +� += β · +� +1 + +4ℓ +OPT +� +. +The second inequality follows, as α∗ ≤ β or β/α∗ ≥ 1. +Now let us remove the assumption that we know the size of an optimal solution. Note that +ℓ ≤ OPT ≤ n. We probe all values between ℓ and n, and for each such value T run our algorithm. +For each matching M returned by the algorithm, we check whether M is ( +α +(1+4ℓ/T), β · (1 + 4ℓ +T ))- +balanced. If this is the case, then we keep this solution. Otherwise, we discard the solution. Finally, +we select a solution of the largest size among the ones not discarded. By the above analysis, with +T = OPT, the matching returned satisfies the desired lower and upper bounds, and has size exactly +⌈OPT/(4ℓ)⌉. Finally, the running time of our algorithm is dominated by 2ℓnO(1) time to compute +a rainbow matching algorithm, as stated in Theorem 1. +Theorem 2. There is a 2ℓ · nO(1) time (1/4ℓ, 1 + 4ℓ/OPT)-approximation algorithm for Propor- +tionally Fair Matching with α > 0. +4 +A Polynomial-time Approximation in the β-limited Case +In the β-limited case, again we make use of the Round procedure. But, the algorithm is slightly +different. Most importantly, we do not apply the algorithm in Theorem 1 in the beginning. Thus, +our algorithm runs in polynomial time. +The Algorithm. Assume that we know the size of OPT. If OPT ≤ 2ℓ, we pick any edge and +return it as the solution. Otherwise, we just greedily pick matched edges in rounds using the +Round procedure with the following two cautions. If for a color, at least β · OPT/(2ℓ) edges have +already been chosen, do not choose any more edge of that color. If at least (OPT/2ℓ) − 1 edges +have already been chosen, terminate. +6 + +Now we analyze the algorithm. First note that if OPT ≤ 2ℓ, the returned matching has only one +edge. The upper bound is trivially satisfied and also we obtain a 2ℓ-approximation. Henceforth, +we assume that OPT > 2ℓ. Before showing the correctness and analysis of the approximation +factor, we show the upper bound for each color. Again let M be the computed matching and +Mc = M ∩ Ec. Later we prove the following lemma. +Lemma 4. The algorithm always returns a matching of size at least (OPT/2ℓ) − 1. +Assuming this we have the following observation. +Observation 3. For each color c ∈ C, |Mc|/|M| ≤ β · (1 + +2ℓ +|OPT|). +Proof. By Lemma 4 and the threshold put on each color in the algorithm, +|Mc| +|M| ≤ β · OPT/(2ℓ) +(OPT/2ℓ) − 1 ≤ β · (1 + +2ℓ +OPT) +The last inequality follows, as OPT > 2ℓ. +Next, we prove Lemma 4. +Proof. Let C1 be the subset of colors such that for each c ∈ C1, the algorithm picks at least +β · OPT/(2ℓ) edges. Note that the algorithm can terminate in two ways (i) it has already picked +at least (OPT/2ℓ) − 1 edges, and (ii) all the edges have been exhausted. Note that if (i) happens, +then we are done. We prove that (ii) cannot happen without (i). Suppose (ii) happens, but not +(i). Let OPT′ be the subset of OPT containing edges of colors in C′ = C \ C1. Recall that Gi−1 +is the input graph to the i-th round and Gi is the output graph for i ≥ 1. The number of edges +chosen in i-th round is at most ℓ. Hence, these edges can be adjacent to at most 2ℓ edges in Gi−1. +In particular, at most 2ℓ less edges of OPT′ are contained in Gi compared to Gi−1. It follows that +the algorithm can pick at least ⌊OPT′/2ℓ⌋ edges of colors in C′. As for each color in C′, less than +β · OPT/(2ℓ) edges are chosen, the algorithm indeed chooses at least ⌊OPT′/2ℓ⌋ edges of these +colors. The total number of edges chosen by the algorithm is, +� +c∈C1 +|Mc| + +� +c∈C′ +|Mc| ≥ +� +c∈C1 +β · OPT/(2ℓ) + ⌊OPT′/2ℓ⌋ +≥ +� +c∈C1 +OPTc/2ℓ + ⌊OPT′/2ℓ⌋ +≥ (OPT/2ℓ) − 1 +But, this is a contradiction to our assumption, and hence the lemma follows. +Theorem 3. There is a polynomial time algorithm for Proportionally Fair Matching in the +β-limited case that returns a matching of size at least (OPT/2ℓ) − 1 where every color appears in +at most β · (1 + 2ℓ/OPT) fraction of the edges. +5 +An Exact Algorithm for Proportionally Fair Matching +Theorem 4. There is a 2O(k)nO(1)-time algorithm that either finds a solution of size k for a +Proportionally Fair Matching instance, or determines that none exists. +Proof. We present two different algorithms using the well-known technique of color coding: one +for the case α = 0 (β-limited case), and one for the case α > 0. +7 + +β-limited case. +We aim to reduce the problem to finding a rainbow matching of size k, which +we then solve via Theorem 1. The graph G remains the same, however the coloring is going to +be different. Namely, for each of the original colors c ∈ C we color the edges in Ec uniformly and +independently at random from a set of k′ new colors, where k′ = ⌊βk⌋. Thus, the new instance +I′ is colored in ℓ · k′ colors. We use the algorithm of Theorem 1 to find a rainbow matching of +size k in the colored graph in I′. Clearly, if a rainbow matching M of size k is found, then the +same matching M is a β-limited matching of size k in the original coloring. This holds since by +construction for any original color c ∈ C, there are k′ new colors in the edge set Ec, and therefore +no more than k′ edges in |M ∩ Ec|. +In the other direction, we show that if there exists a β-limited matching M of size k with +respect to the original coloring, then with good probability M is a rainbow matching of size k +in the new coloring. Assume the original colors c1, . . . , ct, for some 1 ≤ t ≤ ℓ, have non-empty +intersection with M, and for each j ∈ [t] denote kj = |M ∩ Ecj|. Observe that �t +j=1 kj = k, and +for each j ∈ [t], 1 ≤ kj ≤ k′. +Claim 1. There exists some δ > 0 such that for each j ∈ [t]: +Pr +� +M ∩ +� j� +i=1 +Eci +� +is a rainbow matching in I′ +� +≥ exp +� +−δ +j +� +i=1 +ki +� +, +Proof. We prove the claim by induction on j. For the base case, clearly (1) holds for j = 0. Now, +fix j ∈ [t] and assume the statement holds for each j′ < j, we show that (1) also holds for j. +Consider the kj edges of M ∩ Ecj, they are colored uniformly and independently in k′ ≥ kj colors. +By counting possible colorings of M ∩ Ecj, it follows that +Pr +� +M ∩ Ecj is a rainbow matching +� +≥ (k′)!/(k′ − kj)! +(k′)kj +≥ kj! +kkj +j +≥ 2−δkj, +where the last bound is by Stirling’s formula. +Now, since colors used for Ecj do not appear +anywhere else, using the inductive hypothesis we get +Pr +� +M ∩ +� j� +i=1 +Eci +� +is a rainbow matching +� +=Pr +� +M ∩ +�j−1 +� +i=1 +Eci +� +is a rainbow matching +� +· Pr +� +M ∩ Ecj is a rainbow matching +� +≥2−δ �j−1 +i=1 ki · 2−δkj = 2−δ �j +i=1 ki. +Applying (1) with j = t, we obtain that M is a rainbow matching with probability at least +2−δk. By repeating the reduction above 2O(k) times independently, we achieve that the algorithm +succeeds with constant probability. +The case α > 0. +We observe that in this case, if a matching is fair it necessarily contains +at least one edge from each of the groups. Thus, if the number of groups ℓ is greater than k, +we immediately conclude there cannot be a fair matching of size k. Otherwise, we guess how +the desired k edges are partitioned between the ℓ groups C = {c1, . . . , cℓ}. That is, we guess the +numbers kj for j ∈ [ℓ] such that �ℓ +j=1 kj = k, and αk ≤ kj ≤ βk for each j ∈ [ℓ]. From now on, +the algorithm is very similar to the β-limited case. For each group cj, we color the edges of Ecj +8 + +from a set of kj colors uniformly and independently at random, where the colors used for each Ecj +are non-overlapping. Now we use the algorithm of Theorem 1 to find a rainbow matching of size k. +If there is a rainbow matching M of size k, the same matching is a fair matching of size k for the +original instance, since in each Ecj exactly kj edges are chosen, which is at least αk and at most +βk. In the other direction, if there is a fair matching M of size k in the original instance, by (1) +the matching M is a rainbow matching in the new instance with probability at least 2−δk. Again, +by repeating the coloring subprocess independently 2O(k) times, we achieve a constant probability +of success. Since there are 2O(k) options for partitioning k edges into at most ℓ ≤ k groups, the +running time of the whole algorithm is 2O(k)nO(1). +Finally, we note that the coloring part in both cases can be derandomized in the standard +fashion by using perfect hash families [42], leading to a completely deterministic algorithm. +6 +Hardness of Approximation for Proportionally Fair Matching +In this section, we show an inapproximability result for Proportionally Fair Matching under +the Exponential Time Hypothesis (ETH) [33]. ETH states that 2Ω(n) time is needed to solve any +generic 3SAT instance with n variables. For our purpose, we need the following restricted version +of 3SAT. +3SAT-3 +INPUT: Set of clauses T = {C1, . . . , Cm} in variables x1, . . . , xn, each clause being the disjunction +of 3 or 2 literals, where a literal is a variable xi or its negation ¯xi. Additionally, each variable +appears 3 times. +QUESTION: Is there a truth assignment that simultaneously satisfies all the clauses? +3SAT-3 is known to be NP-hard [48]. We need the following stronger lower bound for 3SAT-3 +proved in [20]. +Proposition 1 ([20]). Under ETH, 3SAT-3 cannot be solved in 2o(n) time. +We reduce 3SAT-3 to Proportionally Fair Matching which rules out any approximation +for the latter problem in 2o(ℓ)nO(1) time. Our reduction is as follows. For each clause Ci, we have +a color i. Also, we have n − 1 additional colors m + 1, . . . , m + n − 1. Thus, the set of colors +C = {1, . . . , m + n − 1}. For each variable xi, we construct a gadget, which is a 3-path (a path +with 3 edges). Note that xi can either appear twice in its normal form or in its negated form, as +it appears 3 times in total. Let Ci1, Ci2 and Ci3 be the clauses where xi appears. Also, suppose it +appears in Ci1 and Ci3 in one form, and in Ci2 in the other form. We construct a 3-path Pi for xi +where the j-th edge has color ij for 1 ≤ j ≤ 3. Additionally, we construct n − 1 3-paths Qi,i+1 for +1 ≤ i ≤ n − 1. All edges of Qi,i+1 is of color m + i. Finally, we glue together all the paths in the +following way to obtain a single path. For each 1 ≤ i ≤ n − 1, we glue Qi,i+1 in between Pi and +Pi+1 by identifying the last vertex of Pi with the first vertex of Qi,i+1 and the last vertex of Qi,i+1 +with the first vertex of Pi+1. Thus we obtain a path P with exactly 3(n + n − 1) = 6n − 3 edges. +Finally, we set α = β = 1/(m + n − 1). +Lemma 5. There is a satisfying assignment for the clauses in 3SAT-3 if and only if there is an +(α, β)-balanced matching of size at least m + n − 1. +Proof. Suppose there is a satisfying assignment for all the clauses. For each clause Cj, consider a +variable, say xi, that satisfies Cj. Then there is an edge of color j on Pi. Add this edge to a set M. +Thus, after this step, M contains exactly one edge of color j for 1 ≤ j ≤ m. Also, note that for +each path Pi, if the middle edge is chosen, then no other edge from Pi can be chosen. This is true, +as the variable xi can either satisfy the clauses where it appears in its normal form or the clauses +where it appears in its negated form, but not both types of clauses. Hence, M is a matching. +Finally, for each path Qi,i+1, we add its middle edge to M. Note that M still remains a matching. +9 + +Moreover, M contains exactly one edge of color j for 1 ≤ j ≤ m+n−1. As α = β = 1/(m+n−1), +M is an (α, β)-balanced matching. +Now suppose there is an (α, β)-balanced matching M of size at least m + n − 1. First, we +show that |M| = m + n − 1. Note that if |M| > m + n − 1, then the only possibility is that +|M| = 2(m + n − 1), as α = β and at most 2 edges of color j can be picked in any matching for +m + 1 ≤ j ≤ m + n − 1. Suppose |M| = 2(m + n − 1). Then from each Qi,i+1, M contains the first +and the third edge. This implies, from each Pt, 1 ≤ t ≤ n, we can pick at most one edge. Thus, +total number of edges in M is at most 2(n − 1) + n. It follows that 2m + 2n − 2 ≤ 2n − 2 + n +or n ≥ 2m. Now, in 3SAT-3 the total number of literals is 3n and at most 3m, as each variable +appears 3 times and each clause contains at most 3 literals. This implies n ≤ m, and we obtain +a contradiction. Thus, |M| = m + n − 1. Now, consider any Pi. In the first case, the first and +third edges of Pi are corresponding to literal xi and, hence, the middle edge is corresponding to the +literal ¯xi. If the middle edge is in M, assign 0 to xi, otherwise, assign 1 to xi. In the other case, if +the middle edge is in M, assign 1 to xi, otherwise, assign 0 to xi. We claim that the constructed +assignment satisfies all the clauses. Consider any clause Cj. Let e ∈ Pi be the edge in M of color j +for 1 ≤ j ≤ m. Note that e can be the middle edge in Pi or not. In any case, if e is corresponding +to ¯xi, we assigned 0 to xi, and if e is corresponding to xi, we assigned 1 to xi. Thus, in either case, +Cj is satisfied. This completes the proof of the lemma. +Note that for a 3SAT-3 instance the total numbers of literals is 3n. As each clause contains at +least 2 literals, m ≤ 3n/2. Now, for the instances constructed in the above proof, the number of +colors ℓ = m + n − 1 ≤ 3n/2 + n − 1 = 5n/2 − 1. Thus, the above lemma along with Proposition 1 +show that it is not possible to decide whether there is an (α, β)-balanced matching of a given size +in time 2o(ℓ)nO(1). Using this, we also show that even no 2o(ℓ)nO(1) time approximation algorithm +is possible. +Suppose there is a 2o(ℓ)nO(1) time γ-approximation algorithm, where γ < 1. +For +our constructed path instances, we apply this algorithm to obtain a matching. +Note that the +γ-approximate solution M must contain at least one edge of every color, as α = β. By the proof in +the above lemma, |M| is exactly m+n−1. Hence, using this algorithm, we can decide in 2o(ℓ)nO(1) +time whether there is an (α, β)-balanced matching of size m + n − 1. But, this is a contradiction, +which leads to the following theorem. +Theorem 5. For any γ > 1, under ETH, there is no 2o(ℓ)nO(1) time γ-approximation algorithm +for Proportionally Fair Matching, even on paths. +7 +Conclusions +In this paper, we study the notion of proportional fairness in the context of matchings in graphs, +which has been studied by Chierichetti et al. [15]. We obtained approximation and exact algo- +rithms for the proportionally fair matching problem. We also complement these results by showing +hardness results. It would be interesting to obtain a o(ℓ)- or a true O(ℓ)-approximation for Pro- +portionally Fair Matching improving our result. As evident from our hardness result, there +is a lower bound of 2Ω(ℓ)nO(1) on the running time of such an algorithm. +Acknowledgments. +Most of this work was done when all four authors were affiliated with +University of Bergen, Norway. The research leading to these results has received funding from +the Research Council of Norway via the project MULTIVAL, and the European Research Council +(ERC) via grant LOPPRE, reference 819416. +10 + +References +[1] A. Agarwal, M. Dudik, and Z. S. Wu, Fair regression: Quantitative definitions and +reduction-based algorithms, in International Conference on Machine Learning, PMLR, 2019, +pp. 120–129. 1 +[2] S. Ahmadi, F. Ahmed, J. P. Dickerson, M. Fuge, and S. Khuller, An algorithm +for multi-attribute diverse matching, in Proceedings of the Twenty-Ninth International Joint +Conference on Artificial Intelligence, IJCAI 2020, C. Bessiere, ed., ijcai.org, 2020, pp. 3–9. 1 +[3] N. Alon, R. Yuster, and U. Zwick, Color-coding, Journal of the ACM (JACM), 42 +(1995), pp. 844–856. 3 +[4] J. Angwin, J. Larson, S. Mattu, and L. Kirchner, Machine bias: There’s software +used across the country to predict future criminals. and it’s biased against blacks, ProPublica, +(May 23, 2016). 1 +[5] S. Bandyapadhyay, F. V. Fomin, and K. Simonov, On coresets for fair clustering in +metric and euclidean spaces and their applications, CoRR, abs/2007.10137 (2020). 1 +[6] X. Bei, S. Liu, C. K. Poon, and H. Wang, Candidate selections with proportional fair- +ness constraints, in Proceedings of the 19th International Conference on Autonomous Agents +and Multiagent Systems, AAMAS ’20, Auckland, New Zealand, May 9-13, 2020, A. E. F. +Seghrouchni, G. Sukthankar, B. An, and N. Yorke-Smith, eds., International Foundation for +Autonomous Agents and Multiagent Systems, 2020, pp. 150–158. 3 +[7] S. Bera, D. Chakrabarty, N. Flores, and M. Negahbani, Fair algorithms for cluster- +ing, in Advances in Neural Information Processing Systems, 2019, pp. 4954–4965. 1 +[8] A. Berger, V. Bonifaci, F. Grandoni, and G. Sch¨afer, Budgeted matching and bud- +geted matroid intersection via the gasoline puzzle, Math. Program., 128 (2011), pp. 355–372. +3 +[9] R. Berk, H. Heidari, S. Jabbari, M. Joseph, M. J. Kearns, J. Morgenstern, +S. Neel, and A. Roth, A convex framework for fair regression, CoRR, abs/1706.02409 +(2017). 1 +[10] J. Buolamwini and T. Gebru, Gender shades: Intersectional accuracy disparities in com- +mercial gender classification, in Conference on Fairness, Accountability and Transparency, +FAT 2018, 23-24 February 2018, New York, NY, USA, S. A. Friedler and C. Wilson, eds., +vol. 81 of Proceedings of Machine Learning Research, PMLR, 2018, pp. 77–91. 1 +[11] T. Calders and S. Verwer, Three naive bayes approaches for discrimination-free classifi- +cation, Data Mining and Knowledge Discovery, 21 (2010), pp. 277–292. 1 +[12] L. E. Celis, L. Huang, and N. K. Vishnoi, Multiwinner voting with fairness constraints, +in Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, +IJCAI 2018, July 13-19, 2018, Stockholm, Sweden, J. Lang, ed., ijcai.org, 2018, pp. 144–151. +1 +[13] L. E. Celis, D. Straszak, and N. K. Vishnoi, Ranking with fairness constraints, in +45th International Colloquium on Automata, Languages, and Programming, ICALP 2018, +July 9-13, 2018, Prague, Czech Republic, I. Chatzigiannakis, C. Kaklamanis, D. Marx, and +D. Sannella, eds., vol. 107 of LIPIcs, Schloss Dagstuhl - Leibniz-Zentrum f¨ur Informatik, 2018, +pp. 28:1–28:15. 1 +11 + +[14] L. Charlin and R. Zemel, The toronto paper matching system: an automated paper- +reviewer assignment system, (2013). 1 +[15] F. Chierichetti, R. Kumar, S. Lattanzi, and S. Vassilvitskii, Fair clustering through +fairlets, in Advances in Neural Information Processing Systems, 2017, pp. 5029–5037. 1, 10 +[16] F. Chierichetti, R. Kumar, S. Lattanzi, and S. Vassilvitskii, Matroids, matchings, +and fairness, in The 22nd International Conference on Artificial Intelligence and Statistics, +AISTATS 2019, 16-18 April 2019, Naha, Okinawa, Japan, K. Chaudhuri and M. Sugiyama, +eds., vol. 89 of Proceedings of Machine Learning Research, PMLR, 2019, pp. 2212–2220. 1, 2 +[17] A. Chouldechova, Fair prediction with disparate impact: A study of bias in recidivism +prediction instruments, Big data, 5 (2017), pp. 153–163. 3 +[18] S. Corbett-Davies, E. Pierson, A. Feller, S. Goel, and A. Huq, Algorithmic decision +making and the cost of fairness, in Proceedings of the 23rd ACM SIGKDD International +Conference on Knowledge Discovery and Data Mining, Halifax, NS, Canada, August 13 - 17, +2017, ACM, 2017, pp. 797–806. 1, 3 +[19] C. S. Crowson, E. J. Atkinson, and T. M. Therneau, Assessing calibration of prognostic +risk scores, Statistical methods in medical research, 25 (2016), pp. 1692–1706. 1 +[20] M. Cygan, D. Marx, M. Pilipczuk, and M. Pilipczuk, Hitting forbidden subgraphs in +graphs of bounded treewidth, Information and Computation, 256 (2017), pp. 62–82. 9 +[21] A. Datta, M. C. Tschantz, and A. Datta, Automated experiments on ad privacy settings: +A tale of opacity, choice, and discrimination, Proceedings on privacy enhancing technologies, +2015 (2015), pp. 92–112. 1 +[22] C. Dwork, M. Hardt, T. Pitassi, O. Reingold, and R. Zemel, Fairness through +awareness, in Proceedings of the 3rd innovations in theoretical computer science conference, +2012, pp. 214–226. 1, 3 +[23] C. Dwork and C. Ilvento, Group fairness under composition, in Proceedings of the 2018 +Conference on Fairness, Accountability, and Transparency (FAT* 2018), 2018. 3 +[24] S. Ebadian, A. Kahng, D. Peters, and N. Shah, Optimized distortion and proportional +fairness in voting, in Proceedings of the 23rd ACM Conference on Economics and Computa- +tion, 2022, pp. 563–600. 3 +[25] M. Feldman, S. A. Friedler, J. Moeller, C. Scheidegger, and S. Venkatasubra- +manian, Certifying and removing disparate impact, in proceedings of the 21th ACM SIGKDD +international conference on knowledge discovery and data mining, 2015, pp. 259–268. 1, 2, 3 +[26] R. Freeman, E. Micha, and N. Shah, Two-sided matching meets fair division, (2020). 1 +[27] H. N. Garb, Race bias, social class bias, and gender bias in clinical judgment, Clinical +Psychology: Science and Practice, 4 (1997), pp. 99–120. 1 +[28] D. Garc´ıa-Soriano and F. Bonchi, Fair-by-design matching, Data Min. Knowl. Discov., +34 (2020), pp. 1291–1335. 1, 3 +[29] N. Goel, M. Yaghini, and B. Faltings, Non-discriminatory machine learning through +convex fairness criteria, in Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, +and Society, AIES 2018, New Orleans, LA, USA, February 02-03, 2018, J. Furman, G. E. +Marchant, H. Price, and F. Rossi, eds., ACM, 2018, p. 116. 3 +12 + +[30] S. Gupta, S. Roy, S. Saurabh, and M. Zehavi, Parameterized algorithms and kernels for +rainbow matching, Algorithmica, 81 (2019), pp. 1684–1698. 3, 4 +[31] C. Huang, T. Kavitha, K. Mehlhorn, and D. Michail, Fair matchings and related +problems, Algorithmica, 74 (2016), pp. 1184–1203. 1, 3 +[32] L. Huang, S. Jiang, and N. Vishnoi, Coresets for clustering with fairness constraints, in +Advances in Neural Information Processing Systems, 2019, pp. 7589–7600. 1 +[33] R. Impagliazzo and R. Paturi, On the complexity of k-sat, Journal of Computer and +System Sciences, 62 (2001), pp. 367–375. 2, 9 +[34] M. Joseph, M. J. Kearns, J. Morgenstern, and A. Roth, Fairness in learning: Classic +and contextual bandits, in Advances in Neural Information Processing Systems 29: Annual +Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, +Spain, D. D. Lee, M. Sugiyama, U. von Luxburg, I. Guyon, and R. Garnett, eds., 2016, +pp. 325–333. 1 +[35] Y. Kamada and F. Kojima, Fair matching under constraints: Theory and applications, +(2020). 1, 3 +[36] T. Kamishima, S. Akaho, and J. Sakuma, Fairness-aware learning through regularization +approach, in Data Mining Workshops (ICDMW), 2011 IEEE 11th International Conference +on, Vancouver, BC, Canada, December 11, 2011, M. Spiliopoulou, H. Wang, D. J. Cook, +J. Pei, W. Wang, O. R. Za¨ıane, and X. Wu, eds., IEEE Computer Society, 2011, pp. 643–650. +3 +[37] D. Kesavan, E. Periyathambi, and A. Chokkalingam, A proportional fair scheduling +strategy using multiobjective gradient-based african buffalo optimization algorithm for effective +resource allocation and interference minimization, International Journal of Communication +Systems, 35 (2022), p. e5003. 3 +[38] B. Klaus and F. Klijn, Procedurally fair and stable matching, Economic Theory, 27 (2006), +pp. 431–447. 1, 3 +[39] J. Kleinberg, S. Mullainathan, and M. Raghavan, Inherent trade-offs in the fair de- +termination of risk scores, in 8th Innovations in Theoretical Computer Science Conference +(ITCS 2017), Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik, 2017. 1, 3 +[40] R. Kurata, N. Hamada, A. Iwasaki, and M. Yokoo, Controlled school choice with soft +bounds and overlapping types, Journal of Artificial Intelligence Research, 58 (2017), pp. 153– +184. 1 +[41] Y. Lu, The optimization of automated container terminal scheduling based on proportional +fair priority, Mathematical Problems in Engineering, 2022 (2022). 3 +[42] M. Naor, L. J. Schulman, and A. Srinivasan, Splitters and near-optimal derandom- +ization, in Proceedings of the 36th Annual Symposium on Foundations of Computer Science +(FOCS 1995), IEEE, 1995, pp. 182–191. 9 +[43] M. H. Nguyen, M. Baiou, V. H. Nguyen, and T. Q. T. Vo, Nash fairness solutions for +balanced tsp, in 10th International Network Optimization Conference (INOC), 2022. 3 +[44] P. Ristoski, P. Petrovski, P. Mika, and H. Paulheim, A machine learning approach +for product matching and categorization, Semantic web, 9 (2018), pp. 707–728. 1 +13 + +[45] W. St-Arnaud, M. Carvalho, and G. Farnadi, Adaptation, comparison and prac- +tical implementation of fairness schemes in kidney exchange programs, arXiv preprint +arXiv:2207.00241, (2022). 3 +[46] G. Stamoulis, Approximation algorithms for bounded color matchings via convex decompo- +sitions, in Mathematical Foundations of Computer Science 2014 - 39th International Sympo- +sium, MFCS 2014, Budapest, Hungary, August 25-29, 2014. Proceedings, Part II, E. Csuhaj- +Varj´u, M. Dietzfelbinger, and Z. ´Esik, eds., vol. 8635 of Lecture Notes in Computer Science, +Springer, 2014, pp. 625–636. 3 +[47] B. L. Thanh, S. Ruggieri, and F. Turini, k-nn as an implementation of situation testing +for discrimination discovery and prevention, in Proceedings of the 17th ACM SIGKDD Inter- +national Conference on Knowledge Discovery and Data Mining, San Diego, CA, USA, August +21-24, 2011, C. Apt´e, J. Ghosh, and P. Smyth, eds., ACM, 2011, pp. 502–510. 3 +[48] M. Yannakakis, Node- and edge-deletion np-complete problems, in Proceedings of the 10th +Annual ACM Symposium on Theory of Computing, May 1-3, 1978, San Diego, California, +USA, R. J. Lipton, W. A. Burkhard, W. J. Savitch, E. P. Friedman, and A. V. Aho, eds., +ACM, 1978, pp. 253–264. 9 +[49] G. Zhang, S. Malekmohammadi, X. Chen, and Y. Yu, Equality is not equity: Propor- +tional fairness in federated learning, arXiv preprint arXiv:2202.01666, (2022). 3 +14 + diff --git a/ZNE2T4oBgHgl3EQfZQdS/content/tmp_files/load_file.txt b/ZNE2T4oBgHgl3EQfZQdS/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..dcac8580bb85b4beaa568faa03416b34edd1862f --- /dev/null +++ b/ZNE2T4oBgHgl3EQfZQdS/content/tmp_files/load_file.txt @@ -0,0 +1,791 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf,len=790 +page_content='Proportionally Fair Matching with Multiple Groups Sayan Bandyapadhyay ∗ Fedor V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Fomin†.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Tanmay Inamdar‡ Kirill Simonov§ Abstract The study of fair algorithms has become mainstream in machine learning and artificial intelligence due to its increasing demand in dealing with biases and discrimination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Along this line, researchers have considered fair versions of traditional optimization problems including clustering, regression, ranking and voting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' However, most of the efforts have been channeled into designing heuristic algorithms, which often do not provide any guarantees on the quality of the solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In this work, we study matching problems with the notion of proportional fairness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proportional fairness is one of the most popular notions of group fairness where every group is represented up to an extent proportional to the final selection size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Matching with proportional fairness or more commonly, proportionally fair matching, was introduced in [Chierichetti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', AISTATS, 2019], where the problem was studied with only two groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' However, in many practical applications, the number of groups—although often a small constant—is larger than two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In this work, we make the first step towards understanding the computational complexity of proportionally fair matching with more than two groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We design exact and approximation algorithms achieving reasonable guarantees on the quality of the matching as well as on the time complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Our algorithms are also supported by suitable hardness bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 Introduction Machine learning (ML) algorithms are ubiquitous in today’s world, constantly playing crucial roles in decision-making which has an immeasurable impact on human lives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' These algorithms trained on past instances are extremely powerful and most of the time output correct solutions without making any error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' However, in recent times, these algorithms have faced critiques for being biased towards underrepresented groups [4, 21, 27, 10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Consequently, researchers have made efforts in understanding how biases are introduced in the ML pipeline and whether it is possible to get rid of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This research has given rise to an entire subfield called fairness in ML.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' All the work done so far in this budding subfield can broadly be classified into two types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The first one studies different notions of fairness and their interactions [11, 22, 25, 19, 39, 18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' These works essentially show that there is no universal definition of fairness that captures all the scenarios and it is not possible to satisfy different fairness notions simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the second type of works, researchers have studied fair versions of classical problems incorporating suitable notions of fairness from the first type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Notably the problems considered include clustering [15, 32, 7, 5], regression [34, 9, 1], ranking [13], voting [12] and matching [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In this paper, we consider the proportionally fair matching problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Matching appears natu- rally in several applications in ML, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', assigning products to customers [44];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' students to schools [40];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' reviewers to manuscripts [14];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' and workers to firms [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' There are scores of works that study fair versions of matchings [16, 31, 28, 35, 38, 26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Among these distinct notions of matchings, our work is most relevant to (α, β)-balanced matching [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' (α, β)-balanced matching was formulated ∗Portland State University, USA †University of Bergen, Norway ‡University of Bergen, Norway §Technische Universit¨at Wien, Austria 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='03862v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='DS] 10 Jan 2023 by [16] by bringing proportional fairness and maximum cardinality matching together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Propor- tional fairness is based on the concept of disparate impact [25], which in the context of matching is defined as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' A matching is (α, β)-balanced or proportionally fair if the ratio between the number of edges from each group and the size of the matching is at least α and at most β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As a motivating example of proportionally fair matching, consider the product recommendation problem in e-commerce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' With the advancement of digital marketing and advertising, nowadays companies are interested in more fine-tuned approaches that help them reach the target groups of customers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' These groups may be representative of certain underlying demographic categorizations into based on gender, age group, geographic location etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, the number of groups is often a small constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In particular, in this contemporary setting, one is interested in finding assignments that involve customers from all target groups and have a balanced impact on all these groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This assignment problem can be modeled as the proportionally fair matching problem between customers and products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In a realistic situation, one might need to assign many products to a customer and many customers to a product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This can be achieved by computing multiple matchings in an iterative manner while removing the edges from the input graph that are already matched.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In a seminal work, [16] obtained a polynomial-time 3/2-approximation when the number of groups is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' However, in many real-world situations, like in the above example, it is natural to assume that the number of target groups is more than 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Unfortunately, the algorithm of [16] strongly exploits the fact that the number of groups ℓ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' It is not clear how to adapt or extend their algorithm when we have more than two groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The only known algorithm prior to our work for ℓ > 2 groups was an nO(ℓ)-time randomized exact algorithm [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The running time of this algorithm has a “bad” exponential dependence on the number of groups, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', the running time is not a fixed polynomial in n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, this algorithm quickly becomes impractical if ℓ grows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Our research on proportionally fair matching is driven by the following question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Do there exist efficient algorithms with guaranteed performance for proportionally fair matching when the number of groups ℓ is more than two?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='1 Our results and contributions In this work, we obtain several results on the Proportionally Fair Matching problem with any arbitrary ℓ number of groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' First, we show that the problem is extremely hard for any general ℓ number of groups, in the sense that it is not possible to obtain any approximation algorithm in 2o(ℓ)nO(1) time even on path graphs, unless the Exponential Time Hypothesis (ETH) [33] is false.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' To complement our hardness result, we design a 1/4ℓ-approximation algorithm that runs in 2O(ℓ)nO(1) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Our algorithm might violate the lower (α) and upper (β) bounds by at most a multiplicative factor of (1 + 4ℓ/|OPT|) if |OPT| is more than 4ℓ2, where OPT is any optimum solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, the violation factor is at most 1 + 1/ℓ, and tends to 1 with asymptotic values of |OPT|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We also consider a restricted case of the problem, referred to as the β-limited case in [16], where we only have the upper bound, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', no edges might be present from some groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In this case, we could improve the approximation factor to 1/2ℓ and running time to polynomial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lastly, we show that the parameterized version of the problem where one seeks for a propor- tionally fair matching of size k, can be solved exactly in 2O(k)nO(1) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus the problem is fixed-parameter tractable parameterized by k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' All of our algorithms are based on simple schemes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Our approximation algorithms use an iterative peeling scheme that in each iteration, extracts a rainbow matching containing at most one edge 2 from every group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The exact algorithm is based on a non-trivial application of the celebrated color- coding scheme [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' These algorithms appear in Sections 3, 4, and 5, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The hardness proof is given in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='2 Related work In recent years, researchers have introduced and studied several different notions of fairness, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', disparate impact [25], statistical parity [47, 36], individual fairness [22] and group fairness [23].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kleinberg et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' [39] formulated three notions of fairness and showed that it is theoretically impos- sible to satisfy them simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' See also [18, 17] for similar exposures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The notion of proportional fairness with multiple protected groups is widely studied in the literature, which is based on disparate impact [25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Bei et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' [6] studied the proportional candidate selection problem, where the goal is to select a subset of candidates with various attributes from a given set while satisfying certain proportional fairness constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Goel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' [29] considered the problem of learning non-discriminatory and proportionally fair classifiers and proposed the weighted sum of logs technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proportional fairness has also been considered in the context of Federated learning [49].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Additionally, proportional fairness has been studied in the context of numerous optimization problems including voting [24], scheduling [37, 41], Kidney exchange [45], and Traveling Salesman Problem [43].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Several different fair matching problems have been studied in the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' [31] studied fair b-matching, where matching preferences for each vertex are given as ranks, and the goal is to avoid assigning vertices to high ranked preferences as much as possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Fair-by-design-matching is studied in [28], where instead of a single matching, a probability distribution over all feasible matchings is computed which guarantees individual fairness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' See also [35, 38].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Apart from the fair versions of matchings, many constrained versions are also studied [46, 8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' [46] studied the Bounded Color Matching (BCM) problem where edges are colored and from each color class, only a given number of edges can be chosen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' BCM is a special case of 3-set packing and, hence, admits a 3/4-approximation [46].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We note that the β-limited case of Proportionally Fair Matching is a special case of BCM and, thus, a 3/4-approximation follows in this case where the upper bound might be violated by 3/4 factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' One should compare this factor with our violation factor, which asymptotically tends to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 2 Preliminaries For an integer ℓ ≥ 1, let [ℓ] := {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' , ℓ}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Consider any undirected n-vertex graph G = (V, E) such that the edges in E are colored by colors in C = {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' , ℓ}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The function χ : E → C describes the color assignment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each color c ∈ C, let Ec be the set of edges colored by the color c, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', Ec = χ−1(c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' A subset E′ ⊆ E is a matching in G if no two edges in E′ share a common vertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' (α, β)-balanced matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Given 0 ≤ α ≤ β ≤ 1, a matching M ⊆ E is called (α, β)-balanced if for each color c ∈ C, we have that α ≤ |M ∩ Ec| |M| ≤ β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus a matching is (α, β)-balanced if it contains at least α and at most β fraction of edges from every color.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the Proportionally Fair Matching problem, the goal is to find a maximum- sized (α, β)-balanced matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the restricted β-limited case of the problem, α = 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', we only have the upper bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For γ ≤ 1 and ∆ ≥ 1, a (γ, ∆)-approximation algorithm for Proportionally Fair Matching computes a matching of size at least γ · |OPT|, where every color appears in at least α/∆ fraction of the edges and in at most β · ∆ fraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' OPT is an optimum (α, β)-balanced matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' A matching is called a rainbow matching if all of its edges have distinct colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We will need the following result due to Gupta et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For some integer k > 0, suppose there is a rainbow matching in G of size k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' There is a 2k · nO(1) time algorithm that computes a rainbow matching of size k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 A ( 1 4ℓ, 1+ 4ℓ |OPT|)-Approximation for Proportionally Fair Matching In this section, we design an approximation algorithm for Proportionally Fair Matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Let OPT be an optimum (α, β)-balanced matching, OPTc = OPT∩Ec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We design two algorithms: one for the case when α > 0 and the other for the complementary β-limited case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In this section, we slightly abuse the notation, and use OPT (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' OPTc for some color c ∈ C) to refer to |OPT| (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' |OPTc|).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The intended meaning should be clear from the context;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' however we will disambiguate in case there is a possibility of confusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' First, we consider the α > 0 case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Immediately, we have the following observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Observation 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For any color c ∈ C, OPT contains at least one edge of color c and, hence, G contains a rainbow matching of size ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Our algorithm runs in rounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the following, we define a round.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The input in each round is a subgraph G′ = (V ′, E′) of G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Round.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Initially M = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For every color 1 ≤ c ≤ ℓ, do the following in an iterative manner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If there is no edge of color c in G′, go to the next color or terminate and return (G′, M) if c = ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Otherwise, pick any edge e of color c from G′ and add e to the already computed matching M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Remove all the edges (including e) from G′ that share a common vertex with e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Repeat the process for the next color with the current (or updated) graph G′ or terminate and return (G′, M) if c = ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus in each round, we try to pick a rainbow matching in a greedy manner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Next, we describe our algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The most challenging part of our algorithm is to ensure that the final matching computed is (α, β)-balanced modulo a small factor, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', we need to ensure both the lower and the upper bounds within a small factor for each color.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that just the above greedy way of picking edges might not even ensure that at least one edge is selected from each color.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We use the algorithm of [30] in the beginning to overcome this barrier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' However, the rest of our algorithm is extremely simple.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The Algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We assume that we know the size of OPT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We describe later how to remove this assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Apply the algorithm in Theorem 1 on G to compute a rainbow matching M′ of size ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If OPT ≤ 4ℓ2, return M := M′ as the solution and terminate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Otherwise, remove all the edges of M′ and the edges adjacent to them from G to obtain the graph G0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Initialize M to M′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Greedily pick matched edges in rounds using the Round procedure and add them to M until exactly ⌈OPT/(4ℓ)⌉ edges are picked in total.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In particular, the graph G0 is the input to the 1-st round and G1 is the output graph of the 1-st round.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' G1 is the input to the 2-nd round and G2 is the output graph of the 2-nd round, and so on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that it might be the case that the last round is not completed fully if the size of M is reached to ⌈OPT/(4ℓ)⌉ before the completion of the round.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that the above algorithm is oblivious to α and β in the sense that it never uses these values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Nevertheless, we prove that the computed matching is (α, β)-balanced modulo a small factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now we analyze our algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='1 The Analysis Let Mc = M ∩ Ec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Also, let c∗ be a color c ∈ C such that |OPTc| is the minimum at c = c∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Observation 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' α ≤ 1/ℓ ≤ β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Let ˆc be a color c ∈ C such that |OPTc| is the minimum at c = ˆc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By definition, OPT ≥ ℓ · OPTc∗, or OPTc∗/OPT ≤ 1/ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, α ≤ OPTc∗/OPT ≤ 1/ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Similarly, OPT ≤ ℓ · OPTˆc, or OPTˆc/OPT ≥ 1/ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, β ≥ OPTˆc/OPT ≥ 1/ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 4 First we consider the case when OPT ≤ 4ℓ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In this case the returned matching M is a rainbow matching of size exactly ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The existence of such a matching follows by Observation 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, we immediately obtain a 4ℓ-approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As |Mc|/|M| = 1/ℓ in this case, by Observation 2, α ≤ |Mc|/|M| ≤ β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus we obtain the desired result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the rest of the proof, we analyze the case when OPT > 4ℓ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We start with the following lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The algorithm successfully computes a matching of size exactly ⌈OPT/(4ℓ)⌉.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Moreover, for each color c with OPTc > 4ℓ and round i ∈ [1, ⌈OPTc/(4ℓ)⌉ − 1], Gi−1 contains an edge of color c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that by Observation 1, the algorithm in Theorem 1 successfully computes a rainbow matching M′ of size ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now consider any color c such that OPTc ≤ 4ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For such a color, M already contains at least 1 ≥ ⌈OPTc/(4ℓ)⌉ edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now consider any other color c with |OPTc| > 4ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Consider the rainbow matching M′ computed in the beginning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As |M′| = ℓ, the edges of M′ can be adjacent to at most 2ℓ edges from OPT, since it is a matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In particular, the edges of M′ can be adjacent to at most 2ℓ edges from the set OPTc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hence, G0 contains at least OPTc − 2ℓ edges of the set OPTc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now consider the execution of round i ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' At most ℓ edges are chosen in this round.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hence, these edges can be adjacent to at most 2ℓ edges of OPTc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' It follows that at most 2ℓ fewer edges of the set OPTc are contained in Gi compared to Gi−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As G0 has at least OPTc − 2ℓ edges from the set OPTc of color c and OPTc > 4ℓ, for each of the first ⌈(OPTc−2ℓ)/(2ℓ)⌉ = ⌈OPTc/(2ℓ)⌉−1 rounds, the algorithm will be able to pick an edge of color c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus from such a color c with OPTc > 4ℓ, it can safely pick at least ⌈OPTc/(2ℓ)⌉ ≥ ⌈OPTc/(4ℓ)⌉ edges in total.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now, as OPT = � c OPTc, � c∈C⌈OPTc/(4ℓ)⌉ ≥ ⌈OPT/(4ℓ)⌉.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' It follows that the algorithm can pick at least ⌈OPT/(4ℓ)⌉ edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As we stop the algorithm as soon as the size of M reaches to ⌈OPT/(4ℓ)⌉, the lemma follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that the claimed approximation factor trivially follows from the above lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Next, we show that M is (α, β)-balanced modulo a small factor that asymptotically tends to 1 with the size of OPT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each color c ∈ C, |Mc| ≥ |OPTc∗|/(4ℓ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If OPTc∗ ≤ 4ℓ, |Mc| ≥ 1 ≥ OPTc∗/(4ℓ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' So, assume that OPTc∗ > 4ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now suppose |Mc| < OPTc∗/(4ℓ) for some c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By Lemma 1, for each of the first ⌈OPTc/(4ℓ)⌉ − 1 ≥ ⌈OPTc∗/(4ℓ)⌉ − 1 rounds, Gi−1 contains an edge of color c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' It follows that the algorithm was forcibly terminated in some round i ≤ (OPTc∗/(4ℓ)) − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, the number of edges chosen from each color c′ ̸= c is at most OPTc∗/(4ℓ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hence, |M| = � c′̸=c |Mc′| + |Mc| < (ℓ − 1) · (OPTc∗/(4ℓ)) + (OPTc∗/(4ℓ)) ≤ ⌈OPT/(4ℓ)⌉.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This contradicts Lemma 1, which states that we select exactly ⌈OPT/(4ℓ)⌉ edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each color c ∈ C, (|Mc|/|M|) ≥ α (1+4ℓ/OPT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By Lemma 2, |Mc| ≥ OPTc∗/(4ℓ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' |Mc| |M| ≥ (OPTc∗/(4ℓ)) ⌈OPT/(4ℓ)⌉ ≥ (OPTc∗/(4ℓ)) (OPT/(4ℓ)) + 1 = (OPTc∗)/(OPT) (1 + 4ℓ/OPT) ≥ α (1 + 4ℓ/OPT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The last inequality follows as OPT satisfies the lower bound for all colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 5 Now we turn to proving the upper bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Let α∗ = OPTc∗/OPT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each color c ∈ C, |Mc| ≤ β α∗ · (OPTc∗/(4ℓ)) + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Suppose for some c ∈ C, |Mc| > β α∗ · (OPTc∗/(4ℓ)) + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Then the number of rounds is strictly greater than β α∗ · (OPTc∗/(4ℓ)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now, for any c′, OPTc′ ≥ α∗ · OPT and OPTc′ ≤ β · OPT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, by the definition of α∗, β α∗ · OPTc∗ ≥ OPTc′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' It follows that, for each c′, the number of rounds is strictly greater than OPTc′/(4ℓ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hence, for each c′ ∈ C, more than (OPTc′/(4ℓ)) + 1 edges have been chosen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, the total number of edges chosen is strictly larger than � c′∈C ((OPTc′/(4ℓ)) + 1) ≥ ⌈OPT/(4ℓ)⌉.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This contradicts Lemma 1, which states that we select exactly ⌈OPT/(4ℓ)⌉ edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each color c ∈ C, (|Mc|/|M|) ≤ β · (1 + 4ℓ OPT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By Lemma 3, |Mc| |M| ≤ (β/α∗) · (OPTc∗/(4ℓ)) + 1 ⌈OPT/(4ℓ)⌉ ≤ (β/α∗) · (OPTc∗/(4ℓ)) + (β/α∗) OPT/(4ℓ) = β α∗ · OPTc∗ OPT · � 1 + 4ℓ OPT � = β α∗ · α∗ � 1 + 4ℓ OPT � = β · � 1 + 4ℓ OPT � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The second inequality follows, as α∗ ≤ β or β/α∗ ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now let us remove the assumption that we know the size of an optimal solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that ℓ ≤ OPT ≤ n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We probe all values between ℓ and n, and for each such value T run our algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each matching M returned by the algorithm, we check whether M is ( α (1+4ℓ/T), β · (1 + 4ℓ T ))- balanced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If this is the case, then we keep this solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Otherwise, we discard the solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Finally, we select a solution of the largest size among the ones not discarded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By the above analysis, with T = OPT, the matching returned satisfies the desired lower and upper bounds, and has size exactly ⌈OPT/(4ℓ)⌉.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Finally, the running time of our algorithm is dominated by 2ℓnO(1) time to compute a rainbow matching algorithm, as stated in Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' There is a 2ℓ · nO(1) time (1/4ℓ, 1 + 4ℓ/OPT)-approximation algorithm for Propor- tionally Fair Matching with α > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 4 A Polynomial-time Approximation in the β-limited Case In the β-limited case, again we make use of the Round procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' But, the algorithm is slightly different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Most importantly, we do not apply the algorithm in Theorem 1 in the beginning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, our algorithm runs in polynomial time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The Algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Assume that we know the size of OPT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If OPT ≤ 2ℓ, we pick any edge and return it as the solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Otherwise, we just greedily pick matched edges in rounds using the Round procedure with the following two cautions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If for a color, at least β · OPT/(2ℓ) edges have already been chosen, do not choose any more edge of that color.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If at least (OPT/2ℓ) − 1 edges have already been chosen, terminate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 6 Now we analyze the algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' First note that if OPT ≤ 2ℓ, the returned matching has only one edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The upper bound is trivially satisfied and also we obtain a 2ℓ-approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Henceforth, we assume that OPT > 2ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Before showing the correctness and analysis of the approximation factor, we show the upper bound for each color.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Again let M be the computed matching and Mc = M ∩ Ec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Later we prove the following lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The algorithm always returns a matching of size at least (OPT/2ℓ) − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Assuming this we have the following observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Observation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each color c ∈ C, |Mc|/|M| ≤ β · (1 + 2ℓ |OPT|).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By Lemma 4 and the threshold put on each color in the algorithm, |Mc| |M| ≤ β · OPT/(2ℓ) (OPT/2ℓ) − 1 ≤ β · (1 + 2ℓ OPT) The last inequality follows, as OPT > 2ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Next, we prove Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Let C1 be the subset of colors such that for each c ∈ C1, the algorithm picks at least β · OPT/(2ℓ) edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that the algorithm can terminate in two ways (i) it has already picked at least (OPT/2ℓ) − 1 edges, and (ii) all the edges have been exhausted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that if (i) happens, then we are done.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We prove that (ii) cannot happen without (i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Suppose (ii) happens, but not (i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Let OPT′ be the subset of OPT containing edges of colors in C′ = C \\ C1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Recall that Gi−1 is the input graph to the i-th round and Gi is the output graph for i ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The number of edges chosen in i-th round is at most ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hence, these edges can be adjacent to at most 2ℓ edges in Gi−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In particular, at most 2ℓ less edges of OPT′ are contained in Gi compared to Gi−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' It follows that the algorithm can pick at least ⌊OPT′/2ℓ⌋ edges of colors in C′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As for each color in C′, less than β · OPT/(2ℓ) edges are chosen, the algorithm indeed chooses at least ⌊OPT′/2ℓ⌋ edges of these colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The total number of edges chosen by the algorithm is, � c∈C1 |Mc| + � c∈C′ |Mc| ≥ � c∈C1 β · OPT/(2ℓ) + ⌊OPT′/2ℓ⌋ ≥ � c∈C1 OPTc/2ℓ + ⌊OPT′/2ℓ⌋ ≥ (OPT/2ℓ) − 1 But, this is a contradiction to our assumption, and hence the lemma follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' There is a polynomial time algorithm for Proportionally Fair Matching in the β-limited case that returns a matching of size at least (OPT/2ℓ) − 1 where every color appears in at most β · (1 + 2ℓ/OPT) fraction of the edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 5 An Exact Algorithm for Proportionally Fair Matching Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' There is a 2O(k)nO(1)-time algorithm that either finds a solution of size k for a Proportionally Fair Matching instance, or determines that none exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We present two different algorithms using the well-known technique of color coding: one for the case α = 0 (β-limited case), and one for the case α > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 7 β-limited case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We aim to reduce the problem to finding a rainbow matching of size k, which we then solve via Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The graph G remains the same, however the coloring is going to be different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Namely, for each of the original colors c ∈ C we color the edges in Ec uniformly and independently at random from a set of k′ new colors, where k′ = ⌊βk⌋.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, the new instance I′ is colored in ℓ · k′ colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We use the algorithm of Theorem 1 to find a rainbow matching of size k in the colored graph in I′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Clearly, if a rainbow matching M of size k is found, then the same matching M is a β-limited matching of size k in the original coloring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This holds since by construction for any original color c ∈ C, there are k′ new colors in the edge set Ec, and therefore no more than k′ edges in |M ∩ Ec|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the other direction, we show that if there exists a β-limited matching M of size k with respect to the original coloring, then with good probability M is a rainbow matching of size k in the new coloring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Assume the original colors c1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' , ct, for some 1 ≤ t ≤ ℓ, have non-empty intersection with M, and for each j ∈ [t] denote kj = |M ∩ Ecj|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Observe that �t j=1 kj = k, and for each j ∈ [t], 1 ≤ kj ≤ k′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Claim 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' There exists some δ > 0 such that for each j ∈ [t]: Pr � M ∩ � j� i=1 Eci � is a rainbow matching in I′ � ≥ exp � −δ j � i=1 ki � , Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We prove the claim by induction on j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For the base case, clearly (1) holds for j = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now, fix j ∈ [t] and assume the statement holds for each j′ < j, we show that (1) also holds for j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Consider the kj edges of M ∩ Ecj, they are colored uniformly and independently in k′ ≥ kj colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By counting possible colorings of M ∩ Ecj, it follows that Pr � M ∩ Ecj is a rainbow matching � ≥ (k′)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='/(k′ − kj)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' (k′)kj ≥ kj!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' kkj j ≥ 2−δkj, where the last bound is by Stirling’s formula.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now, since colors used for Ecj do not appear anywhere else, using the inductive hypothesis we get Pr � M ∩ � j� i=1 Eci � is a rainbow matching � =Pr � M ∩ �j−1 � i=1 Eci � is a rainbow matching � Pr � M ∩ Ecj is a rainbow matching � ≥2−δ �j−1 i=1 ki · 2−δkj = 2−δ �j i=1 ki.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Applying (1) with j = t, we obtain that M is a rainbow matching with probability at least 2−δk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By repeating the reduction above 2O(k) times independently, we achieve that the algorithm succeeds with constant probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The case α > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We observe that in this case, if a matching is fair it necessarily contains at least one edge from each of the groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, if the number of groups ℓ is greater than k, we immediately conclude there cannot be a fair matching of size k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Otherwise, we guess how the desired k edges are partitioned between the ℓ groups C = {c1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' , cℓ}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' That is, we guess the numbers kj for j ∈ [ℓ] such that �ℓ j=1 kj = k, and αk ≤ kj ≤ βk for each j ∈ [ℓ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' From now on, the algorithm is very similar to the β-limited case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each group cj, we color the edges of Ecj 8 from a set of kj colors uniformly and independently at random, where the colors used for each Ecj are non-overlapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now we use the algorithm of Theorem 1 to find a rainbow matching of size k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If there is a rainbow matching M of size k, the same matching is a fair matching of size k for the original instance, since in each Ecj exactly kj edges are chosen, which is at least αk and at most βk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the other direction, if there is a fair matching M of size k in the original instance, by (1) the matching M is a rainbow matching in the new instance with probability at least 2−δk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Again, by repeating the coloring subprocess independently 2O(k) times, we achieve a constant probability of success.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Since there are 2O(k) options for partitioning k edges into at most ℓ ≤ k groups, the running time of the whole algorithm is 2O(k)nO(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Finally, we note that the coloring part in both cases can be derandomized in the standard fashion by using perfect hash families [42], leading to a completely deterministic algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 6 Hardness of Approximation for Proportionally Fair Matching In this section, we show an inapproximability result for Proportionally Fair Matching under the Exponential Time Hypothesis (ETH) [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' ETH states that 2Ω(n) time is needed to solve any generic 3SAT instance with n variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For our purpose, we need the following restricted version of 3SAT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3SAT-3 INPUT: Set of clauses T = {C1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' , Cm} in variables x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' , xn, each clause being the disjunction of 3 or 2 literals, where a literal is a variable xi or its negation ¯xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Additionally, each variable appears 3 times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' QUESTION: Is there a truth assignment that simultaneously satisfies all the clauses?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3SAT-3 is known to be NP-hard [48].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We need the following stronger lower bound for 3SAT-3 proved in [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proposition 1 ([20]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Under ETH, 3SAT-3 cannot be solved in 2o(n) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We reduce 3SAT-3 to Proportionally Fair Matching which rules out any approximation for the latter problem in 2o(ℓ)nO(1) time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Our reduction is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each clause Ci, we have a color i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Also, we have n − 1 additional colors m + 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' , m + n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, the set of colors C = {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' , m + n − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each variable xi, we construct a gadget, which is a 3-path (a path with 3 edges).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that xi can either appear twice in its normal form or in its negated form, as it appears 3 times in total.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Let Ci1, Ci2 and Ci3 be the clauses where xi appears.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Also, suppose it appears in Ci1 and Ci3 in one form, and in Ci2 in the other form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We construct a 3-path Pi for xi where the j-th edge has color ij for 1 ≤ j ≤ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Additionally, we construct n − 1 3-paths Qi,i+1 for 1 ≤ i ≤ n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' All edges of Qi,i+1 is of color m + i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Finally, we glue together all the paths in the following way to obtain a single path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each 1 ≤ i ≤ n − 1, we glue Qi,i+1 in between Pi and Pi+1 by identifying the last vertex of Pi with the first vertex of Qi,i+1 and the last vertex of Qi,i+1 with the first vertex of Pi+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus we obtain a path P with exactly 3(n + n − 1) = 6n − 3 edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Finally, we set α = β = 1/(m + n − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' There is a satisfying assignment for the clauses in 3SAT-3 if and only if there is an (α, β)-balanced matching of size at least m + n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Suppose there is a satisfying assignment for all the clauses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For each clause Cj, consider a variable, say xi, that satisfies Cj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Then there is an edge of color j on Pi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Add this edge to a set M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, after this step, M contains exactly one edge of color j for 1 ≤ j ≤ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Also, note that for each path Pi, if the middle edge is chosen, then no other edge from Pi can be chosen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This is true, as the variable xi can either satisfy the clauses where it appears in its normal form or the clauses where it appears in its negated form, but not both types of clauses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hence, M is a matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Finally, for each path Qi,i+1, we add its middle edge to M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that M still remains a matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 9 Moreover, M contains exactly one edge of color j for 1 ≤ j ≤ m+n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As α = β = 1/(m+n−1), M is an (α, β)-balanced matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now suppose there is an (α, β)-balanced matching M of size at least m + n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' First, we show that |M| = m + n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that if |M| > m + n − 1, then the only possibility is that |M| = 2(m + n − 1), as α = β and at most 2 edges of color j can be picked in any matching for m + 1 ≤ j ≤ m + n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Suppose |M| = 2(m + n − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Then from each Qi,i+1, M contains the first and the third edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This implies, from each Pt, 1 ≤ t ≤ n, we can pick at most one edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, total number of edges in M is at most 2(n − 1) + n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' It follows that 2m + 2n − 2 ≤ 2n − 2 + n or n ≥ 2m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now, in 3SAT-3 the total number of literals is 3n and at most 3m, as each variable appears 3 times and each clause contains at most 3 literals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This implies n ≤ m, and we obtain a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, |M| = m + n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now, consider any Pi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the first case, the first and third edges of Pi are corresponding to literal xi and, hence, the middle edge is corresponding to the literal ¯xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' If the middle edge is in M, assign 0 to xi, otherwise, assign 1 to xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In the other case, if the middle edge is in M, assign 1 to xi, otherwise, assign 0 to xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We claim that the constructed assignment satisfies all the clauses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Consider any clause Cj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Let e ∈ Pi be the edge in M of color j for 1 ≤ j ≤ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that e can be the middle edge in Pi or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' In any case, if e is corresponding to ¯xi, we assigned 0 to xi, and if e is corresponding to xi, we assigned 1 to xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, in either case, Cj is satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' This completes the proof of the lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that for a 3SAT-3 instance the total numbers of literals is 3n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As each clause contains at least 2 literals, m ≤ 3n/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Now, for the instances constructed in the above proof, the number of colors ℓ = m + n − 1 ≤ 3n/2 + n − 1 = 5n/2 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thus, the above lemma along with Proposition 1 show that it is not possible to decide whether there is an (α, β)-balanced matching of a given size in time 2o(ℓ)nO(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Using this, we also show that even no 2o(ℓ)nO(1) time approximation algorithm is possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Suppose there is a 2o(ℓ)nO(1) time γ-approximation algorithm, where γ < 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For our constructed path instances, we apply this algorithm to obtain a matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Note that the γ-approximate solution M must contain at least one edge of every color, as α = β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' By the proof in the above lemma, |M| is exactly m+n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hence, using this algorithm, we can decide in 2o(ℓ)nO(1) time whether there is an (α, β)-balanced matching of size m + n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' But, this is a contradiction, which leads to the following theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' For any γ > 1, under ETH, there is no 2o(ℓ)nO(1) time γ-approximation algorithm for Proportionally Fair Matching, even on paths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 7 Conclusions In this paper, we study the notion of proportional fairness in the context of matchings in graphs, which has been studied by Chierichetti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We obtained approximation and exact algo- rithms for the proportionally fair matching problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' We also complement these results by showing hardness results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' It would be interesting to obtain a o(ℓ)- or a true O(ℓ)-approximation for Pro- portionally Fair Matching improving our result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' As evident from our hardness result, there is a lower bound of 2Ω(ℓ)nO(1) on the running time of such an algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Acknowledgments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Most of this work was done when all four authors were affiliated with University of Bergen, Norway.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' The research leading to these results has received funding from the Research Council of Norway via the project MULTIVAL, and the European Research Council (ERC) via grant LOPPRE, reference 819416.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 10 References [1] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Agarwal, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Dudik, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Wu, Fair regression: Quantitative definitions and reduction-based algorithms, in International Conference on Machine Learning, PMLR, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 120–129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [2] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Ahmadi, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Ahmed, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Dickerson, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Fuge, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Khuller, An algorithm for multi-attribute diverse matching, in Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI 2020, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Bessiere, ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', ijcai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='org, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3–9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [3] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Alon, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Yuster, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Zwick, Color-coding, Journal of the ACM (JACM), 42 (1995), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 844–856.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [4] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Angwin, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Larson, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Mattu, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kirchner, Machine bias: There’s software used across the country to predict future criminals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' and it’s biased against blacks, ProPublica, (May 23, 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [5] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Bandyapadhyay, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Fomin, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Simonov, On coresets for fair clustering in metric and euclidean spaces and their applications, CoRR, abs/2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='10137 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [6] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Bei, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Liu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Poon, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Wang, Candidate selections with proportional fair- ness constraints, in Proceedings of the 19th International Conference on Autonomous Agents and Multiagent Systems, AAMAS ’20, Auckland, New Zealand, May 9-13, 2020, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Seghrouchni, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Sukthankar, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' An, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Yorke-Smith, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', International Foundation for Autonomous Agents and Multiagent Systems, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 150–158.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [7] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Bera, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Chakrabarty, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Flores, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Negahbani, Fair algorithms for cluster- ing, in Advances in Neural Information Processing Systems, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 4954–4965.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [8] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Berger, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Bonifaci, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Grandoni, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Sch¨afer, Budgeted matching and bud- geted matroid intersection via the gasoline puzzle, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Program.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', 128 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 355–372.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [9] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Berk, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Heidari, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Jabbari, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Joseph, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kearns, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Morgenstern, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Neel, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Roth, A convex framework for fair regression, CoRR, abs/1706.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='02409 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [10] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Buolamwini and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Gebru, Gender shades: Intersectional accuracy disparities in com- mercial gender classification, in Conference on Fairness, Accountability and Transparency, FAT 2018, 23-24 February 2018, New York, NY, USA, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Friedler and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Wilson, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 81 of Proceedings of Machine Learning Research, PMLR, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 77–91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [11] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Calders and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Verwer, Three naive bayes approaches for discrimination-free classifi- cation, Data Mining and Knowledge Discovery, 21 (2010), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 277–292.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [12] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Celis, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Huang, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Vishnoi, Multiwinner voting with fairness constraints, in Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI 2018, July 13-19, 2018, Stockholm, Sweden, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lang, ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', ijcai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='org, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 144–151.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [13] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Celis, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Straszak, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Vishnoi, Ranking with fairness constraints, in 45th International Colloquium on Automata, Languages, and Programming, ICALP 2018, July 9-13, 2018, Prague, Czech Republic, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Chatzigiannakis, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kaklamanis, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Marx, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Sannella, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 107 of LIPIcs, Schloss Dagstuhl - Leibniz-Zentrum f¨ur Informatik, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 28:1–28:15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 11 [14] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Charlin and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Zemel, The toronto paper matching system: an automated paper- reviewer assignment system, (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [15] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Chierichetti, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kumar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lattanzi, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Vassilvitskii, Fair clustering through fairlets, in Advances in Neural Information Processing Systems, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 5029–5037.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 10 [16] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Chierichetti, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kumar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lattanzi, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Vassilvitskii, Matroids, matchings, and fairness, in The 22nd International Conference on Artificial Intelligence and Statistics, AISTATS 2019, 16-18 April 2019, Naha, Okinawa, Japan, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Chaudhuri and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Sugiyama, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 89 of Proceedings of Machine Learning Research, PMLR, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 2212–2220.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 2 [17] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Chouldechova, Fair prediction with disparate impact: A study of bias in recidivism prediction instruments, Big data, 5 (2017), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 153–163.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [18] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Corbett-Davies, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Pierson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Feller, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Goel, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Huq, Algorithmic decision making and the cost of fairness, in Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, Halifax, NS, Canada, August 13 - 17, 2017, ACM, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 797–806.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 3 [19] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Crowson, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Atkinson, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Therneau, Assessing calibration of prognostic risk scores, Statistical methods in medical research, 25 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1692–1706.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [20] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Cygan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Marx, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Pilipczuk, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Pilipczuk, Hitting forbidden subgraphs in graphs of bounded treewidth, Information and Computation, 256 (2017), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 62–82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 9 [21] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Datta, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Tschantz, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Datta, Automated experiments on ad privacy settings: A tale of opacity, choice, and discrimination, Proceedings on privacy enhancing technologies, 2015 (2015), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 92–112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [22] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Dwork, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hardt, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Pitassi, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Reingold, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Zemel, Fairness through awareness, in Proceedings of the 3rd innovations in theoretical computer science conference, 2012, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 214–226.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 3 [23] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Dwork and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Ilvento, Group fairness under composition, in Proceedings of the 2018 Conference on Fairness, Accountability, and Transparency (FAT* 2018), 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [24] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Ebadian, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kahng, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Peters, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Shah, Optimized distortion and proportional fairness in voting, in Proceedings of the 23rd ACM Conference on Economics and Computa- tion, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 563–600.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [25] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Feldman, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Friedler, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Moeller, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Scheidegger, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Venkatasubra- manian, Certifying and removing disparate impact, in proceedings of the 21th ACM SIGKDD international conference on knowledge discovery and data mining, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 259–268.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 2, 3 [26] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Freeman, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Micha, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Shah, Two-sided matching meets fair division, (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [27] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Garb, Race bias, social class bias, and gender bias in clinical judgment, Clinical Psychology: Science and Practice, 4 (1997), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 99–120.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [28] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Garc´ıa-Soriano and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Bonchi, Fair-by-design matching, Data Min.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Knowl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Discov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', 34 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1291–1335.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 3 [29] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Goel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Yaghini, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Faltings, Non-discriminatory machine learning through convex fairness criteria, in Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, AIES 2018, New Orleans, LA, USA, February 02-03, 2018, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Furman, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Marchant, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Price, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Rossi, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', ACM, 2018, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 116.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 12 [30] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Gupta, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Roy, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Saurabh, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Zehavi, Parameterized algorithms and kernels for rainbow matching, Algorithmica, 81 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1684–1698.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3, 4 [31] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Huang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kavitha, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Mehlhorn, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Michail, Fair matchings and related problems, Algorithmica, 74 (2016), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1184–1203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 3 [32] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Huang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Jiang, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Vishnoi, Coresets for clustering with fairness constraints, in Advances in Neural Information Processing Systems, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 7589–7600.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [33] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Impagliazzo and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Paturi, On the complexity of k-sat, Journal of Computer and System Sciences, 62 (2001), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 367–375.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 2, 9 [34] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Joseph, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kearns, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Morgenstern, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Roth, Fairness in learning: Classic and contextual bandits, in Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lee, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Sugiyama, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' von Luxburg, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Guyon, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Garnett, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 325–333.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [35] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kamada and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kojima, Fair matching under constraints: Theory and applications, (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 3 [36] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kamishima, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Akaho, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Sakuma, Fairness-aware learning through regularization approach, in Data Mining Workshops (ICDMW), 2011 IEEE 11th International Conference on, Vancouver, BC, Canada, December 11, 2011, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Spiliopoulou, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Wang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Cook, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Pei, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Wang, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Za¨ıane, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Wu, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', IEEE Computer Society, 2011, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 643–650.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [37] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kesavan, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Periyathambi, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Chokkalingam, A proportional fair scheduling strategy using multiobjective gradient-based african buffalo optimization algorithm for effective resource allocation and interference minimization, International Journal of Communication Systems, 35 (2022), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' e5003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [38] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Klaus and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Klijn, Procedurally fair and stable matching, Economic Theory, 27 (2006), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 431–447.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 3 [39] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kleinberg, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Mullainathan, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Raghavan, Inherent trade-offs in the fair de- termination of risk scores, in 8th Innovations in Theoretical Computer Science Conference (ITCS 2017), Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1, 3 [40] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Kurata, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Hamada, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Iwasaki, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Yokoo, Controlled school choice with soft bounds and overlapping types, Journal of Artificial Intelligence Research, 58 (2017), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 153– 184.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 [41] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lu, The optimization of automated container terminal scheduling based on proportional fair priority, Mathematical Problems in Engineering, 2022 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [42] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Naor, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Schulman, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Srinivasan, Splitters and near-optimal derandom- ization, in Proceedings of the 36th Annual Symposium on Foundations of Computer Science (FOCS 1995), IEEE, 1995, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 182–191.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 9 [43] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Nguyen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Baiou, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Nguyen, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Vo, Nash fairness solutions for balanced tsp, in 10th International Network Optimization Conference (INOC), 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [44] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Ristoski, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Petrovski, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Mika, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Paulheim, A machine learning approach for product matching and categorization, Semantic web, 9 (2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 707–728.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 1 13 [45] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' St-Arnaud, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Carvalho, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Farnadi, Adaptation, comparison and prac- tical implementation of fairness schemes in kidney exchange programs, arXiv preprint arXiv:2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='00241, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [46] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Stamoulis, Approximation algorithms for bounded color matchings via convex decompo- sitions, in Mathematical Foundations of Computer Science 2014 - 39th International Sympo- sium, MFCS 2014, Budapest, Hungary, August 25-29, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Proceedings, Part II, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Csuhaj- Varj´u, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Dietzfelbinger, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' ´Esik, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 8635 of Lecture Notes in Computer Science, Springer, 2014, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 625–636.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [47] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Thanh, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Ruggieri, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Turini, k-nn as an implementation of situation testing for discrimination discovery and prevention, in Proceedings of the 17th ACM SIGKDD Inter- national Conference on Knowledge Discovery and Data Mining, San Diego, CA, USA, August 21-24, 2011, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Apt´e, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Ghosh, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Smyth, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', ACM, 2011, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 502–510.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 [48] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Yannakakis, Node- and edge-deletion np-complete problems, in Proceedings of the 10th Annual ACM Symposium on Theory of Computing, May 1-3, 1978, San Diego, California, USA, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Lipton, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Burkhard, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Savitch, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Friedman, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Aho, eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=', ACM, 1978, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 253–264.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 9 [49] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Malekmohammadi, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Chen, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' Yu, Equality is not equity: Propor- tional fairness in federated learning, arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content='01666, (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} +page_content=' 3 14' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNE2T4oBgHgl3EQfZQdS/content/2301.03862v1.pdf'} diff --git a/ZNFRT4oBgHgl3EQfPjcf/content/tmp_files/2301.13517v1.pdf.txt b/ZNFRT4oBgHgl3EQfPjcf/content/tmp_files/2301.13517v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..088b9e931206169b1a67620d5b0615afde8c6d5a --- /dev/null +++ b/ZNFRT4oBgHgl3EQfPjcf/content/tmp_files/2301.13517v1.pdf.txt @@ -0,0 +1,6650 @@ +A cut finite element method for the heat equation on +overlapping meshes: +L2-analysis for dG(0) mesh movement +Mats G. Larson, Carl Lundholm +Abstract +We present a cut finite element method for the heat equation on two overlapping +meshes. By overlapping meshes we mean a mesh hierarchy with a stationary back- +ground mesh at the bottom and an overlapping mesh that is allowed to move around +on top of the background mesh. Overlapping meshes can be used as an alternative +to costly remeshing for problems with changing or evolving interior geometry. In this +paper the overlapping mesh is prescribed a dG(0) movement, meaning that its loca- +tion as a function of time is discontinuous and piecewise constant. For the discrete +function space, we use continuous Galerkin in space and discontinuous Galerkin in +time, with the addition of a discontinuity on the boundary between the two meshes. +The finite element formulation is based on Nitsche’s method. The dG(0) mesh move- +ment results in a space-time discretization with a nice product structure between +space and time which allows for existing analysis methodologies to be applied with +only minor modifications. We follow the analysis methodology presented by Eriksson +and Johnson in [12, 13], here referred to as an L2-analysis because of the norm used +in the error analysis. The greatest modification is the use of a shift operator that +generalizes the Ritz projection operator. The shift operator is used to handle the +shift in the overlapping mesh’s location at discrete times. The L2-analysis consists +of the corresponding standard stability estimates and a priori error estimate that +is of optimal order with respect to both time step and mesh size. We also present +numerical results for a problem in one spatial dimension that verify the analytic error +convergence orders. +Keywords: CutFEM, overlapping meshes, moving meshes, parabolic problem, error anal- +ysis +1 +arXiv:2301.13517v1 [math.NA] 31 Jan 2023 + +Contents +1 +Introduction +3 +2 +Problem +5 +3 +Method +5 +3.1 +Preliminaries +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +5 +3.2 +Finite element spaces . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +7 +3.2.1 +The semi-discrete spaces Vh(t) and Vh(In) . . . . . . . . . . . . . . . . . . . . . . . . +7 +3.2.2 +The fully discrete spaces V n +h and Vh +. . . . . . . . . . . . . . . . . . . . . . . . . . . +8 +3.3 +Finite element formulation . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +10 +4 +Analytic preliminaries +10 +4.1 +The bilinear form Ah,t . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +10 +4.1.1 +Standard operators that map to Vh(t) . . . . . . . . . . . . . . . . . . . . . . . . . . +13 +4.1.2 +Shift operator . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +14 +4.2 +The bilinear form Bh . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +19 +4.3 +Consistency and Galerkin orthogonality +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . +21 +4.4 +A discrete dual problem . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +23 +5 +Stability analysis +23 +5.1 +The basic stability estimate . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +24 +5.2 +The strong stability estimate +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +26 +5.3 +Proof of Theorem 5.1 (The main stability estimate) . . . . . . . . . . . . . . . . . . . . . . . +34 +6 +A priori error analysis +36 +7 +Numerical results +43 +7.1 +Illustrative examples . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +43 +7.2 +Convergence study . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +45 +7.3 +dG(0) in time . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +46 +7.3.1 +Illustrative examples . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +46 +7.3.2 +Convergence study . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +48 +7.4 +dG(1) in time . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +51 +7.4.1 +Illustrative examples . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +51 +7.4.2 +Convergence study . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +53 +7.5 +Comparison with analytic results . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +55 +8 +Conclusions +55 +A Analytic tools +56 +B Interpolation +61 +B.1 +Spatial interpolation operator . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +61 +B.2 +Temporal interpolation operator +. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . +65 +References +68 +2 + +1 +Introduction +The finite element method (FEM) is a well-known tool for computing approximate solutions +of partial differential equations (PDEs). It is particularly suitable for PDE-problems with +complicated geometry since it allows for unstructured domain-fitted meshes. Unstructured +meshes are more computationally expensive to generate and memory demanding to store +than structured meshes since there is no underlying structure that may be used. +Cut +finite element methods (CutFEMs) enable the use of structured meshes in problems with +complicated geometry. CutFEM may also make costly remeshing redundant for problems +with changing or evolving geometries or for other situations involving meshing such as +adaptive mesh refinement. Using standard FEM for such problems usually means that a +new mesh has to be generated when the geometry has changed too much. With CutFEM +the geometry may be represented by an interface whose location in relation to the mesh may +be arbitrary, thus allowing the same mesh to be used for different or changing interfaces. +A common type of problem with changing geometry is one where there is an object in the +solution domain that moves relatively to the domain boundary. An advantageous CutFEM +approach to such problems is to use overlapping meshes, meaning two or more meshes +ordered in a mesh hierarchy. This is also called composite grids/meshes and multimesh in +the literature but the meaning is the same. The idea is to first remove the object from +the domain and to generate a stationary background mesh in the empty solution domain. +The background mesh may thus be a nicely structured mesh. +A second mesh is then +generated around the object. The mesh containing the object is then placed “on top” of +the background mesh, creating a mesh hierarchy. The movement of the object will thus +also cause its encapsulating mesh to move. +Over the past two decades, a theoretical foundation for the formulation of stabilized +CutFEM has been developed by extending the ideas of Nitsche, presented in [1], to a +general weak formulation of the interface conditions, thereby removing the need for domain- +fitted meshes. The foundations of CutFEM were presented in [2] and then extended to +overlapping meshes in [3]. The CutFEM methodology has since been developed and applied +to a number of important multiphysics problems. See for example [4, 5, 6, 7]. For CutFEM +on overlapping meshes in particular, see for example [8, 9, 10, 11]. So far, only CutFEM +for stationary PDE-problems on overlapping meshes have been developed and analysed to +a satisfactory degree, thus leaving analogous work for time-dependent PDE-problems to be +desired. +The work presented here is intended to be an initial part of developing CutFEM for +time-dependent PDE-problems on overlapping meshes. We consider CutFEM for the heat +equation on two overlapping meshes: one stationary background mesh and one moving +overlapping mesh. Depending on how the mesh movement is represented, quite different +space-time discretizations may arise, allowing for different types of analyses of the Cut- +FEM. In general the mesh movement may either be continuous or discontinuous. We have +considered the simplest case of both of these two types, which we refer to as cG(1) and +dG(0) mesh movement, where cG(r) and dG(r) stand for continuous and discontinuous +Galerkin of order r, respectively. +The mesh movements are named after what type of +3 + +function the location of the overlapping mesh is when considered as a function of time. +Thus cG(1) mesh movement means that the location of the overlapping mesh as a function +of time is continuous and piecewise linear, and dG(0) mesh movement means that it is +discontinuous and piecewise constant. +In a very first study, we considered cG(1) mesh movement and attempted to follow the +analysis methodology presented by Eriksson and Johnson in [12, 13], here referred to as +an L2-analysis because of the norm used in the error analysis. However, due to the space- +time discretization resulting from the cG(1) mesh movement, the L2-analysis failed. The +study, containing partial results of the incomplete L2-analysis, was presented in the MSc- +thesis [14]. With that very first study as a starting point, we retreated in two directions +by considering a less demanding energy analysis and the simpler dG(0) mesh movement, +meaning less complicated space-time discretization. This has resulted in two new studies +with complete analyses. One study concerns an energy analysis for cG(1) mesh movement, +and the other concerns an L2-analysis for dG(0) mesh movement. This paper presents the +latter. Table 1 gives an overview of the various studies of CutFEM for the heat equation +on two overlapping meshes performed so far. +dG(0) mesh movement +cG(1) mesh movement +Energy analysis +- + +L2-analysis +This paper  +MSc-thesis [14]  +Table 1: Overview of studies of CutFEM for the heat equation on two overlapping meshes +based on analysis and mesh movement type. The checkmark indicates a complete analysis +and the x-mark one that is currently incomplete. +In this paper, the overlapping mesh is prescribed a dG(0) movement. This results in a +discretization that has a product structure between space and time in each slab. Standard +analysis methodology therefore work with some modifications. We follow the L2-analysis +presented by Eriksson and Johnson in [12, 13]. The main modification to the standard +analysis is the use of a shift operator that generalizes the Ritz projection operator. The +shift operator is used to handle the shift in the overlapping mesh’s location at discrete times. +The general analysis consists of stability and error estimates. The error analysis concerns +an optimal order a priori error estimate of the L2-norm of the approximation error at the +final time. This estimate shows that the method preserves the so called superconvergence +of the error with respect to the time step. +The outline of the rest of this manuscript is as follows. In Section 2, the original PDE- +problem is formulated. In Section 3, the corresponding CutFEM is presented. In Section +4, necessary tools for the analysis are presented such as bilinear forms and operators. In +Section 5, we present and prove basic and strong stability for the finite element solution. +In Section 6, we present and prove an optimal order a priori error estimate. In Section +7, we present numerical results for a problem in one spatial dimension that verify the +analytic convergence orders of the approximation error. The last part of this manuscript +is an appendix where we present tools used in the analysis. +4 + +2 +Problem +For d = 1, 2, or 3, let Ω0 ⊂ Rd be a bounded convex domain, i.e., connected open set, with +polygonal boundary ∂Ω0. Let T > 0 be a given final time. Let x ∈ Rd denote the spatial +coordinate and t ∈ R denote time. Furthermore, let G ⊂ Ω0 ⊂ Rd be another bounded +domain with polygonal boundary ∂G. We let the location of G be time-dependent by +prescribing for G a velocity µ : [0, T] → Rd. This means that G and ∂G are functions +of time, i.e., G = G(t) and ∂G = ∂G(t) for t ∈ [0, T]. We point out that the shape of +G remains the same for all times. From Ω0 and G, we define the following two domains: +: Ω1 +: Ω2 +: Γ +Ω0 +µ +G +Figure 1: Partition of Ω0 into Ω1 (blue) and +Ω2 (red) for d = 2 with G moving with veloc- +ity µ. +Ω1 := Ω0 \ (G ∪ ∂G), +(2.1) +Ω2 := Ω0 ∩ G, +(2.2) +with boundaries ∂Ω1 and ∂Ω2, respectively. +For i = 1, 2, the set Ωi and its boundary +∂Ωi are functions of time, i.e., Ωi = Ωi(t) +and ∂Ωi = ∂Ωi(t) for t ∈ [0, T]. Let the +common boundary between Ω1 and Ω2 be +Γ := ∂Ω1 ∩ ∂Ω2, +(2.3) +where, of course, Γ also is a function of +time, i.e., Γ = Γ(t), for t ∈ [0, T]. Note +that for any t ∈ [0, T], we have the parti- +tion +Ω0 = Ω1(t) ∪ Γ(t) ∪ Ω2(t). +(2.4) +An illustration of the partition of Ω0 as a result of G’s location is shown in Figure 1. +We consider the heat equation in Ω0 × (0, T] with given source function f, homoge- +neous Dirichlet boundary conditions, and initial value u0. +The problem is: Find u ∈ +H1((0, T], L2(Ω0)) ∩ L2((0, T], H2(Ω0) ∩ H1 +0(Ω0)) such that +� +� +� +� +� +˙u − ∆u = f +in Ω0 × (0, T], +u = 0 +on ∂Ω0 × (0, T], +u = u0 +in Ω0 × {0}, +(2.5) +where ˙u = ∂tu, ∆ is the Laplace operator, the source function f ∈ L2((0, T], Ω0), and the +initial data u0 ∈ H2(Ω0) ∩ H1 +0(Ω0). +3 +Method +3.1 +Preliminaries +Let T0 and TG be quasi-uniform simplicial meshes of Ω0 and G, respectively. We denote by +hK the diameter of a simplex K. We partition the time interval (0, T] quasi-uniformly into +5 + +N subintervals In = (tn−1, tn] of length kn = tn − tn−1, where 0 = t0 < t1 < . . . < tN = T +and n = 1, . . . , N. We assume the following space-time quasi-uniformity of the spatial and +temporal discretizations: For h = maxK∈T0∪TG{hK}, and k = max1≤n≤N{kn}, assume that +there exist constants C1, C2 > 0 such that +h ≤ C1kmin, +k ≤ C2hmin, +(3.1) +where kmin = min1≤n≤N{kn}, and hmin = minK∈T0∪TG{hK}. We next define the following +slabwise space-time domains: +S0,n := Ω0 × In, +(3.2) +Si,n := {(x, t) ∈ S0,n : x ∈ Ωi(t)}, +i = 1, 2, +(3.3) +¯Γn := {(s, t) ∈ S0,n : s ∈ Γ(t)}. +(3.4) +In general we will use bar, i.e.,¯·, to denote something related to space-time, such as domains +and variables. In addition to the “visible” and uncovered domains Ω1(t) and Ω2(t), we also +consider the covered overlap domain ΩO(t). To define it, we will use the set of simplices +T0,Γ(t) := {K ∈ T0 : K ∩ Γ(t) ̸= ∅}, i.e., all simplices in T0 that are cut by Γ at time t. We +define the overlap domain ΩO(t) for a time t ∈ [0, T] by +ΩO(t) := +� +K∈T0,Γ(t) +K ∩ Ω2(t). +(3.5) +As a discrete counterpart to the movement of the domain G, we prescribe a dG(0) +movement for the overlapping mesh TG. By this we mean that the location of the overlap- +ping mesh TG is a dG(0) function with respect to time, i.e., discontinuous on [0, T] and +constant on each In. This means that on each In the position of TG is fixed, but changes +from In−1 to In. We take this change to be +� +In µ(t) dt, i.e., the total change in the location +of G over In. An illustration of the slabwise space-time domains Si,n, defined by (3.3), +with dG(0) mesh movement is shown in Figure 2. The dG(0) mesh movement also results +in the following: +Ωi,n = Ωi(tn) = Ωi(t), +∀t ∈ In, +(3.6a) +Γn = Γ(tn) = Γ(t), +∀t ∈ In, +(3.6b) +ΩO,n = ΩO(tn) = ΩO(t), +∀t ∈ In. +(3.6c) +6 + +S1,n−1 +x2 +x1 +t +S2,n−1 +tn−2 +S1,n +tn +S2,n +tn−1 +Figure 2: Space-time slabs with dG(0) mesh movement. +3.2 +Finite element spaces +Let {ϕ0,j}j be the set of polynomial interior basis functions of degree p for T0 and let +{ϕG,j}j be the set of polynomial basis functions of degree p for TG. Note that the basis +functions of TG depend on time as well as space, since the position of TG changes globally +in time. +3.2.1 +The semi-discrete spaces Vh(t) and Vh(In) +For t ∈ [0, T], we define the semi-discrete finite element spaces Vh,0 and Vh,G as the spaces +of continuous piecewise polynomials of degree ≤ p on T0 and TG, respectively. We also let +the functions in Vh,0 be zero on ∂Ω0. For t ∈ [0, T], let +Vh,0 := +� +v : v(x, t) = +� +j +Vj(t)ϕ0,j(x), Vj : [0, T] → R, ∀j +� +, +(3.7) +Vh,G := +� +v : v(x, t) = +� +j +Vj(t)ϕG,j(x, t), Vj : [0, T] → R, ∀j +� +. +(3.8) +We now use these two spaces to define the broken finite element space Vh(t) as the space +of functions that on Ω1(t) are restrictions of functions in Vh,0 to Ω1(t), and on Ω2(t) are +restrictions of functions in Vh,G to Ω2(t). For t ∈ [0, T], let +Vh(t) := {v : v|Ω1(t) = v0|Ω1(t), for some v0 ∈ Vh,0 and +v|Ω2(t) = vG|Ω2(t), for some vG ∈ Vh,G}. +(3.9) +7 + +See Figure 3 for an illustration of a function v ∈ Vh(t). From the dG(0) movement of TG, +we have via (3.6a) that Vh(t) is the same space for all t ∈ In. We thus write +Vh,n = Vh(tn) = Vh(t), +∀t ∈ In. +(3.10) +Now we define the space Vh(In) as the space of functions that lie in Vh,n, for all t ∈ In. For +n = 1, . . . , N, let +Vh(In) := {v : v(·, t) ∈ Vh,n, ∀t ∈ In}. +(3.11) +With a general and somewhat relaxed notation, any v ∈ Vh(In) can be represented as +v(x, t) = +� +j +Vj(t)ϕj(x), +(3.12) +where the ϕj’s belong to both {ϕ0,j}j and {ϕG,j}j, and the only restriction on the coeffi- +cients Vj is that Vj(t) ∈ R for all t ∈ (0, T]. +x +v(x, t) +0 +Figure 3: An example of v(x, t) versus x for d = 1, where v(·, t) ∈ Vh(t), p = 1, and time +t ∈ (0, T]. The nodes of the blue background mesh T0 are marked with circles and the +nodes of the red moving overlapping mesh TG with crosses. +3.2.2 +The fully discrete spaces V n +h and Vh +Now we consider a subspace of Vh(In), which consists of functions whose coefficients have +a polynomial time dependence of degree q or lower. Analogously with the procedure of +defining Vh(t), we first define two auxiliary finite element spaces. For n = 1, . . . , N, let V n +h,0 +and V n +h,G be the spaces of continuous piecewise polynomials of degree ≤ p on T0 and TG +for all t ∈ In, respectively, and polynomials of degree ≤ q in time along the trajectories of +T0 and TG for t ∈ In, respectively. We also let the functions in V n +h,0 be zero on ∂Ω0 for all +8 + +t ∈ In. For n = 1, . . . , N, let +V n +h,0 := +� +v : v(x, t) = +� +j +Vj(t)ϕ0,j(x), Vj ∈ Pq(In), ∀j +� +, +(3.13) +V n +h,G := +� +v : v(x, t) = +� +j +Vj(t)ϕG,j(x), Vj ∈ Pq(In), ∀j +� +, +(3.14) +where Pq(In) is the space of polynomials of degree ≤ q on In; see Figure 4. We now use +these two spaces to define the broken finite element space V n +h as the space of functions that +on S1,n are restrictions of functions in V n +h,0 to S1,n, and on S2,n are restrictions of functions +in V n +h,G to S2,n. For n = 1, . . . , N, let +V n +h := {v : v|S1,n = vn +0 |S1,n, for some vn +0 ∈ V n +h,0, and +v|S2,n = vn +G|S2,n, for some vn +G ∈ V n +h,G}. +(3.15) +Finally, we define the finite element space Vh as the space of functions that lie in V n +h for +n = 1, . . . , N: +Vh := {v : v|S0,n ∈ V n +h , n = 1, . . . , N}. +(3.16) +Vj(t) +t +tk−1 +tk−2 +tk +tk+1 +Vj(t) +t +tk−1 +tk−2 +tk +tk+1 +Figure 4: Examples of Vj(t) versus t on three subsequent time subintervals In = (tn−1, tn], +for Vj ∈ Pq(In). Left: q = 0, i.e., Vj is constant on each In. Right: q = 1, i.e., Vj is at +most linear on each In. +9 + +3.3 +Finite element formulation +We may now formulate the space-time cut finite element formulation for the problem +described in Section 2 as follows: Find uh ∈ Vh such that +N +� +n=1 +� � +In +( ˙uh, v)Ω0 dt + ([uh]n−1, v+ +n−1)Ω0 +� ++ +2 +� +i=1 +N +� +n=1 +� +In +(∇uh, ∇v)Ωi,n dt ++ +N +� +n=1 +� � +¯Γn +−⟨∂nuh⟩[v] − ⟨∂nv⟩[uh] + γh−1 +K [uh][v] d¯s +� ++ +N +� +n=1 +� +In +([∇uh], [∇v])ΩO,n dt += +� T +0 +(f, v)Ω0 dt +(3.17) +for all v ∈ Vh, where Vh is defined by (3.16), In = (tn−1, tn], (·, ·)Ω is the L2(Ω)-inner +product, [v]n is the jump in v at time tn, i.e., [v]n = v+ +n − v− +n , v± +n = limε→0+ v(x, tn ± ε), +Ωi,n = Ωi(tn), ¯Γn = Γn × In, ⟨v⟩ is a convex weighted average of v on Γ, i.e., ⟨v⟩ = +ω1v1 + ω2v2, where ω1, ω2 ∈ [0, 1] and ω1 + ω2 = 1, vi = limε→0+ v(¯s − εni), ¯s = (s, t), +n is the normal vector to Γ (it should not be confused with time index n, e.g., in tn), +∂nv = n · ∇v, [v] is the jump in v over Γ, i.e., [v] = v1 − v2, γ ≥ 0 is a stabilization +parameter, hK = hK(x) = hK0 for x ∈ K0, where hK0 is the diameter of simplex K0 ∈ T0, +and ΩO,n is defined by (3.6c). +4 +Analytic preliminaries +4.1 +The bilinear form Ah,t +For t ∈ [0, T] and k ∈ N we define the broken Sobolev spaces +Hk(Ω1(t), Ω2(t)) := Hk(∪iΩi(t)) := {v ∈ L2(Ω0) : v|Ωi(t) ∈ Hk(Ωi(t)), i = 1, 2}, +Hk +0 (Ω1(t), Ω2(t)) := Hk +0 (∪iΩi(t)) := {v ∈ Hk(Ω1(t), Ω2(t)) : v|∂Ω0 = 0}, +(4.1) +where Hk denotes the Sobolev space W k,2. We define the symmetric bilinear form Ah,t on +H1(∪iΩi(t)) by +Ah,t(w, v) := +2 +� +i=1 +(∇w, ∇v)Ωi(t) − (⟨∂nw⟩, [v])Γ(t) − (⟨∂nv⟩, [w])Γ(t) ++ (γh−1 +K [w], [v])Γ(t) + ([∇w], [∇v])ΩO(t), +(4.2) +10 + +where (w, v)Γ(t) is the L2(Γ(t))-inner product. From the dG(0) movement of TG, we have +via (3.6) that Ah,t is the same bilinear form for all t ∈ In. We thus write +An = Ah,tn = Ah,t, +∀t ∈ In. +(4.3) +Note that we have +� +¯Γn +wv d¯s = +� +In +(w, v)Γ(t) dt. +(4.4) +Using (4.4) and the bilinear form Ah,t, we may write the finite element variational formu- +lation (3.17) as: Find uh ∈ Vh such that +N +� +n=1 +� � +In +( ˙uh, v)Ω0 dt + ([uh]n−1, v+ +n−1)Ω0 +� ++ +N +� +n=1 +� +In +An(uh, v) dt = +� T +0 +(f, v)Ω0 dt, +(4.5) +for all v ∈ Vh. +Recall that T0,Γ(t) is the set of all simplices in T0 that are cut by Γ(t) and let ΓK(t) := +K ∩ Γ(t). We define the following two mesh-dependent norms: +∥w∥2 +1/2,h,Γ(t) := +� +K∈T0,Γ(t) +h−1 +K ∥w∥2 +ΓK(t), +(4.6) +∥w∥2 +−1/2,h,Γ(t) := +� +K∈T0,Γ(t) +hK∥w∥2 +ΓK(t). +(4.7) +Note that +∥w∥2 +Γ(t) = +� +K∈T0,Γ(t) +hKh−1 +K ∥w∥2 +ΓK(t) ≤ h +� +K∈T0,Γ(t) +h−1 +K ∥w∥2 +ΓK(t) = h∥w∥2 +1/2,h,Γ(t), +(4.8) +and +(w, v)Γ(t) = +� +Γ(t) +(h1/2 +K w)(h−1/2 +K +v) ds ≤ +� � +Γ(t) +hKw2 ds +�1/2� � +Γ(t) +h−1 +K v2 ds +�1/2 += +� +� +K∈T0,Γ(t) +hK +� +ΓK(t) +w2 ds +�1/2� +� +K∈T0,Γ(t) +h−1 +K +� +ΓK(t) +v2 ds +�1/2 += ∥w∥−1/2,h,Γ(t)∥v∥1/2,h,Γ(t), +(4.9) +where h = maxKl∈T0∪TG(hKl). With the two mesh-dependent norms, we define the time- +dependent spatial energy norm |||·|||Ah,t on H1 +0(Ω1(t), Ω2(t)) by +|||w|||2 +Ah,t := +2 +� +i=1 +∥∇w∥2 +Ωi(t) + ∥⟨∂nw⟩∥2 +−1/2,h,Γ(t) + ∥[w]∥2 +1/2,h,Γ(t) + ∥[∇w]∥2 +ΩO(t). +(4.10) +Note that boundedness of Ah,t on +� +H1 +0(Ω1(t), Ω2(t)), |||·|||Ah,t +� +follows trivially from using +(4.9) and (4.8) in (4.2). We are now ready to prove coercivity of Ah,t on Vh(t) with respect +to |||·|||Ah,t. +11 + +Lemma 4.1 (Discrete coercivity of Ah,t). Let the bilinear form Ah,t and the energy norm +|||·|||Ah,t be defined by (4.2) and (4.10), respectively. Then, for t ∈ [0, T] and γ sufficiently +large, there exists a constant αt > 0 such that +Ah,t(v, v) ≥ αt |||v|||2 +Ah,t , +∀v ∈ Vh(t). +(4.11) +Proof. Following the proof of the coercivity in [2], we start by inserting v ∈ Vh(t) into Ah,t: +Ah,t(v, v) = +2 +� +i=1 +(∇v, ∇v)Ωi(t) − (⟨∂nv⟩, [v])Γ(t) − (⟨∂nv⟩, [v])Γ(t) ++ (γh−1 +K [v], [v])Γ(t) + ([∇v], [∇v])ΩO(t) += +2 +� +i=1 +∥∇v∥2 +Ωi(t) − 2(⟨∂nv⟩, [v])Γ(t) + γ∥[v]∥2 +1/2,h,Γ(t) + ∥[∇v]∥2 +ΩO(t). +(4.12) +The second term in the last row of (4.12) with opposite sign is +2(⟨∂nv⟩, [v])Γ(t) ≤ 2∥⟨∂nv⟩∥−1/2,h,Γ(t)∥[v]∥1/2,h,Γ(t) +≤ 1 +ε∥⟨∂nv⟩∥2 +−1/2,h,Γ(t) + ε∥[v]∥2 +1/2,h,Γ(t) += 2 +ε∥⟨∂nv⟩∥2 +−1/2,h,Γ(t) − 1 +ε∥⟨∂nv⟩∥2 +−1/2,h,Γ(t) + ε∥[v]∥2 +1/2,h,Γ(t) +≤ 2 +εCI +� +2 +� +i=1 +∥∇v∥2 +Ωi(t) + ∥[∇v]∥2 +ΩO(t) +� +− 1 +ε∥⟨∂nv⟩∥2 +−1/2,h,Γ(t) + ε∥[v]∥2 +1/2,h,Γ(t), +(4.13) +where ε > 0 is to be chosen and CI > 0. We have used (4.9) to obtain the first inequality. To +obtain the last inequality, we have used the inverse inequality from Lemma A.4. Inserting +(4.13) into (4.12) gives +Ah,t(v, v) ≥ +2 +� +i=1 +∥∇v∥2 +Ωi(t) − 2 +εCI +� +2 +� +i=1 +∥∇v∥2 +Ωi(t) + ∥[∇v]∥2 +ΩO(t) +� ++ 1 +ε∥⟨∂nv⟩∥2 +−1/2,h,Γ(t) − ε∥[v]∥2 +1/2,h,Γ(t) ++ γ∥[v]∥2 +1/2,h,Γ(t) + ∥[∇v]∥2 +ΩO(t) += +� +1 − 2CI +ε +� +2 +� +i=1 +∥∇v∥2 +Ωi(t) + 1 +ε∥⟨∂nv⟩∥2 +−1/2,h,Γ(t) ++ (γ − ε)∥[v]∥2 +1/2,h,Γ(t) + +� +1 − 2CI +ε +� +∥[∇v]∥2 +ΩO(t). +(4.14) +By taking ε > 2CI, e.g., ε = 4CI, and γ > ε we may obtain (4.11) from (4.14). +12 + +4.1.1 +Standard operators that map to Vh(t) +Here we define some standard spatial operators for every t ∈ [0, T]. The L2(Ω0)-projection +operator Ph,t : L2(Ω0) → Vh(t) is defined by +(Ph,tw, v)Ω0 = (w, v)Ω0, +∀v ∈ Vh(t). +(4.15) +The Ritz projection operator Rh,t : H1(∪iΩi(t)) → Vh(t) is defined by +Ah,t(Rh,tw, v) = Ah,t(w, v), +∀v ∈ Vh(t). +(4.16) +Lemma 4.2 (Estimates for the Ritz projection error). Let the spatial energy norm |||·|||Ah,t +and the Ritz projection operator Rh,t be defined by (4.10) and (4.16), respectively. Then +there exist constants C1, C2 > 0 such that for any w ∈ Hp+1(Ω0) ∩ H1 +0(Ω0) we have that +|||w − Rh,tw|||Ah,t ≤ C1hp∥Dp+1 +x +w∥Ω0, +(4.17) +∥w − Rh,tw∥Ω0 ≤ C2hp+1∥Dp+1 +x +w∥Ω0. +(4.18) +Proof. The proof is basically the same as in the standard case but with natural modifica- +tions to account for the CutFEM setting. First we will show the energy estimate (4.17), +then we will use it together with the Aubin-Nitsche duality trick to show (4.18). +Let +δ = w − Rh,tw denote the projection error. +We start by splitting the error using the +interpolant Ih,tw ∈ Vh(t), where Ih,t is the spatial interpolation operator defined by (B.1), +δ = w − Rh,tw ±Ih,tw +� �� � +=0 += w − Ih,tw +� +�� +� +=π ++ Ih,tw − Rh,tw +� +�� +� +=η += π + η. +(4.19) +We then consider +|||δ|||Ah,t = |||π + η|||Ah,t ≤ |||π|||Ah,t + |||η|||Ah,t , +(4.20) +where we focus on the η-part first. We note that η ∈ Vh(t) and use Lemma 4.1, i.e., the +discrete coercivity of Ah,t, to get +|||η|||2 +Ah,t ≤ 1 +αt +Ah,t(η, η) = CAh,t(Ih,tw − Rh,tw ± w, η) += CAh,t(Ih,tw − w +� +�� +� +=−π +, η) + C Ah,t(w − Rh,tw, η) +� +�� +� +=0 from (4.16) += −CAh,t(π, η) +≤ C |||π|||Ah,t |||η|||Ah,t , +=⇒ +|||η|||Ah,t ≤ C |||π|||Ah,t . +(4.21) +Using (4.21) in (4.20), we get +|||w − Rh,tw|||Ah,t = |||δ|||Ah,t ≤ C |||π|||Ah,t = C |||w − Ih,tw|||Ah,t +(B.2) +≤ Chp∥Dp+1 +x +w∥Ω0, +(4.22) +13 + +which is (4.17). We consider the auxiliary problem: Find φ ∈ H2(Ω0) ∩ H1 +0(Ω0) such that +− ∆φ = δ +in Ω0. +(4.23) +We note that ∇φ ∈ H1(Ω0) from φ ∈ H2(Ω0), which means that ∇φ|Γ(t) ∈ L2(Γ(t)). Thus +[∂nφ]|Γ(t) = 0 in L2(Γ(t)). We denote by Ih,p=1,t the spatial interpolation operator Ih,t for +p = 1, and note that Ih,p=1,tφ ∈ Vh(t). The square of the left-hand side of (4.18) is +∥w − Rh,tw∥2 +Ω0 = (δ, δ)Ω0 +(4.23) += +(−∆φ, δ)Ω0 +(A.12) += +Ah,t(φ, δ) = Ah,t(φ, w − Rh,tw) +5th += Ah,t(φ − Ih,p=1,tφ, w − Rh,tw) +≤ C |||φ − Ih,p=1,tφ|||Ah,t |||w − Rh,tw|||Ah,t +(B.2),(4.17) +≤ +C +� +Ch∥D2 +xφ∥Ω0 +�� +Chp∥Dp+1 +x +w∥Ω0 +� +8th +≤ Chp+1∥∆φ∥Ω0∥Dp+1 +x +w∥Ω0 = Chp+1∥w − Rh,tw∥Ω0∥D2 +xw∥Ω0, +(4.24) +where, in the fifth step, we have used that w − Rh,tw is Ah,t-orthogonal to Vh(t), which +follows from the definition of Rh,t. In the eighth step, we have used elliptic regularity on +H2(Ω0) ∩ H1 +0(Ω0) for φ. Dividing both sides of (4.24) by a factor ∥w − Rh,tw∥Ω0 gives +(4.18). +Note that Lemma 4.2 provides estimates for the approximation error for the elliptic prob- +lems corresponding to (2.5) and (3.17). This is so since the discrete elliptic solution is ex- +actly the Ritz projection of the continuous one. The discrete Laplacian ∆h,t : H1(∪iΩi(t)) → +Vh(t) is defined by +(−∆h,tw, v)Ω0 = Ah,t(w, v), +∀v ∈ Vh(t). +(4.25) +From the dG(0) movement of TG, we have via (3.10) and (4.3) that +Pn = Ph,tn = Ph,t, +∀t ∈ In, +(4.26) +Rn = Rh,tn = Rh,t, +∀t ∈ In, +(4.27) +∆n = ∆h,tn = ∆h,t, +∀t ∈ In. +(4.28) +4.1.2 +Shift operator +Here, we introduce a shift operator not present in the standard Eriksson and Johnson +analysis, presented in [12, 13], that is needed because of the dG(0) mesh movement in the +CutFEM setting. The shift operator will be used in the proof of Lemma 5.2, i.e., the strong +stability estimate. At one point in the proof, one would like to consider Rnu− +h,n−1. This +is however undefined in the current setting because of the shifting discontinuity coming +from the movement of Γ. Since Rn is only defined for functions in H1(∪iΩi,n) and u− +h,n−1 ∈ +Vh,n−1 ⊂ H1(∪iΩi,n−1), we cannot talk about Rnu− +h,n−1. Enter shift operator. The idea is +to consider a Ritzlike projection from one discrete space to another. +14 + +To define the shift operator, we will use a special bilinear form An−1,n. To define An−1,n, +we will use a partition of Ω0 into the subdomains +ωij = ωi,j,n−1 := Ωi,n−1 ∩ Ωj,n, +for i, j = 1, 2. +(4.29) +For n = 1, . . . , N, we define the non-symmetric bilinear form An−1,n on H1(∪ijωij) by +An−1,n(v, w) := +2 +� +i,j=1 +(∇v, ∇w)ωij − (⟨∂nv⟩, [w])Γn − ([v], ⟨∂nw⟩)Γn−1. +(4.30) +Using the energy norm |||·|||Ah,t defined by (4.10) together with (4.3), we define two related +energy norms, one on H1 +0(∪iΩi,n−1) and the other on H1 +0(∪iΩi,n), by +|||v|||2 +An−1 := |||v|||2 +An−1 + ∥⟨∂nv⟩∥2 +−1/2,h,Γn, +(4.31) +|||w|||2 +An := |||w|||2 +An + ∥⟨∂nw⟩∥2 +−1/2,h,Γn−1. +(4.32) +With these two norms, we may obtain a continuity result for An−1,n, which we present as +the following lemma: +Lemma 4.3 (Continuity of An−1,n). Let the bilinear form An−1,n be defined by (4.30), and +the two norms |||·|||An−1 and |||·|||An by (4.31) and (4.32), respectively. Then there exists a +constant C > 0 such that +An−1,n(v, w) ≤ C |||v|||An−1 |||w|||An , +∀v ∈ H1(∪iΩi,n−1) and ∀w ∈ H1(∪iΩi,n). +(4.33) +Proof. The proof is straightforward. The left-hand side of (4.33) is +An−1,n(v, w) = +2 +� +i,j=1 +(∇v, ∇w)ωij +� +�� +� +=I +− (⟨∂nv⟩, [w])Γn +� +�� +� +=II +− ([v], ⟨∂nw⟩)Γn−1 +� +�� +� +=III +. +(4.34) +We treat the terms separately, starting with the first: +I = +2 +� +i,j=1 +(∇v, ∇w)ωij ≤ +� +2 +� +i,j=1 +∥∇v∥2 +ωij +�1/2� +2 +� +i,j=1 +∥∇w∥2 +ωij +�1/2 += +� +2 +� +i=1 +∥∇v∥2 +Ωi,n−1 +�1/2� +2 +� +i=1 +∥∇w∥2 +Ωi,n +�1/2 +≤ |||v|||An−1 |||w|||An ≤ |||v|||An−1 |||w|||An , +(4.35) +where we have used that v ∈ H1(∪iΩi,n−1) and w ∈ H1(∪iΩi,n) to merge integrals over +ωij’s to integrals over Ωi’s, e.g., ∥∇v∥2 +ωi1 + ∥∇v∥2 +ωi2 = ∥∇v∥2 +Ωi,n−1. The second term is +II = (⟨∂nv⟩, [w])Γn +(4.9) +≤ ∥⟨∂nv⟩∥−1/2,h,Γn∥[w]∥1/2,h,Γn ≤ |||v|||An−1 |||w|||An . +(4.36) +15 + +The third term is treated in the same way, thus +III = ([v], ⟨∂nw⟩)Γn−1 +(4.9) +≤ ∥[v]∥1/2,h,Γn−1∥⟨∂nw⟩∥−1/2,h,Γn−1 ≤ |||v|||An−1 |||w|||An . +(4.37) +Collecting the estimates for the three terms gives the continuity result (4.33). +By restricting v and w in Lemma 4.3 to the corresponding discrete subspaces, i.e., Vh,n−1 +and Vh,n, respectively, we may obtain a continuity result in the weaker An-norms. This is +done by estimating the average terms in the An-norms using an inverse inequality that is +a twist on the one from Lemma A.4. The treatment is analogous for both average terms +in the An-norms so we only consider the one in (4.31). This term is +∥⟨∂nv⟩∥2 +−1/2,h,Γn = ∥⟨∂nv⟩∥2 +−1/2,h,Γn∩Γn−1 + ∥⟨∂nv⟩∥2 +−1/2,h,Γn\Γn−1 +≤ ∥⟨∂nv⟩∥2 +−1/2,h,Γn−1 + ∥⟨∂nv⟩∥2 +−1/2,h,Γn\Γn−1 +≤ C |||v|||2 +An−1 + ∥⟨∂nv⟩∥2 +−1/2,h,Γn\Γn−1, +(4.38) +where we also want to estimate the second term by |||v|||2 +An−1. We do this by following the +proof of Lemma A.4, omitting some of the steps that are the same. Partitioning Γn \ Γn−1 +into `Γi := (Γn \ Γn−1) ∩ Ωi,n−1, using the interdependent indices i and j, and writing +`ΓiKj = Kj ∩ `Γi, we have for v ∈ Vh,n−1 that +∥⟨∂nv⟩∥2 +−1/2,h,Γn\Γn−1 = +2 +� +i=1 +∥⟨∂nv⟩∥2 +−1/2,h,`Γi = +2 +� +i=1 +� +K0∈T0,`Γi +hK0∥⟨∂nv⟩∥2 +`ΓiK0 +(A.22) +≤ +C +2 +� +i=1 +� +Kj∈Tj,`Γi +hKj∥⟨∂nv⟩∥2 +`ΓiKj = C +� +`ΓiKj +hKj∥⟨∂nv⟩∥2 +`ΓiKj +≤ C +� +`ΓiKj +hKj +� +∥(∇v)+∥2 +`ΓiKj + ∥(∇v)−∥2 +`ΓiKj +� +(A.20) +≤ +C +� +`ΓiKj +hKj +� +Ch−1 +K+ +j ∥∇v∥2 +K+ +j + Ch−1 +K− +j ∥∇v∥2 +K− +j +� +≤ C +� +`ΓiKj +� +∥∇v∥2 +K+ +j + ∥∇v∥2 +K− +j +� +≤ C +� +∥∇v∥2 +Ω1,n−1 + ∥(∇v)1∥2 +ΩO,n−1 + ∥∇v∥2 +Ω2,n−1 +� +(A.25) +≤ +C +� +2 +� +i=1 +∥∇v∥2 +Ωi,n−1 + ∥[∇v]∥2 +ΩO,n−1 +� +≤ C |||v|||2 +An−1 . +(4.39) +16 + +By plugging (4.39) into (4.38), we get for v ∈ Vh,n−1 that +∥⟨∂nv⟩∥−1/2,h,Γn ≤ C |||v|||An−1 , +(4.40) +and the analogous results for w ∈ Vh,n +∥⟨∂nw⟩∥−1/2,h,Γn−1 ≤ C |||w|||An . +(4.41) +Using (4.40) and (4.41) in (4.31) and (4.32), respectively, we may obtain the discrete norm +equivalences +|||v|||An−1 ≤ |||v|||An−1 ≤ C |||v|||An−1 , +∀v ∈ Vh,n−1, +(4.42) +|||w|||An ≤ |||w|||An ≤ C |||w|||An , +∀w ∈ Vh,n. +(4.43) +By restricting the functions in Lemma 4.3 to the discrete subspaces, we may use the above +norm equivalences to obtain a discrete continuity result which we present as the following +corollary: +Corollary 4.1 (Discrete continuity of An−1,n). Let the bilinear form An−1,n and the spatial +energy norm |||·|||An be defined by (4.30) and (4.10), respectively. +Then there exists a +constant C > 0 such that +An−1,n(v, w) ≤ C |||v|||An−1 |||w|||An , +∀v ∈ Vh,n−1 and ∀w ∈ Vh,n. +(4.44) +We are now ready to move on to the shift operator. +Definition 4.1 (Shift operators). For every time tn−1, where n = 1, . . . , N, we define the +two shift operators S + +n−1 : Vh,n−1 → Vh,n and S − +n−1 : Vh,n → Vh,n−1 by +An(S + +n−1v, w) = An−1,n(v, w), +∀w ∈ Vh,n, +(4.45) +An−1(v, S − +n−1w) = An−1,n(v, w), +∀v ∈ Vh,n−1. +(4.46) +The forward in time shift operator S + +n−1 is the main one that is used in the analysis, and +we write Sn = S + +n−1 for brevity. For all the results we present and prove for Sn, there are +corresponding ones for S − +n−1 that are proven in an analogous way. For v ∈ Vh,n−1, using +Snv ∈ Vh,n, the discrete coercivity of An, the definition of Sn, and the discrete continuity +of An−1,n, we get that +α |||Snv|||2 +An +(4.11) +≤ An(Snv, Snv) +(4.45) += +An−1,n(v, Snv) +(4.44) +≤ C |||v|||An−1 |||Snv|||An . +(4.47) +From (4.47) we obtain the following stability of the shift operator: +|||Snv|||An ≤ C |||v|||An−1 , +∀v ∈ Vh,n−1. +(4.48) +The shift operator has two approximability properties that are essential for its application +in the analysis. We present and prove these properties in the following two lemmas: +17 + +Lemma 4.4 (An estimate for the shift error). Let the shift operator Sn = S + +n−1 and the +spatial energy norm |||·|||An−1 be defined by (4.45) and (4.10), respectively. Then there exists +a constant C > 0 such that +∥v − Snv∥Ω0 ≤ Ch |||v|||An−1 , +∀v ∈ Vh,n−1. +(4.49) +Proof. The proof is based on the proof of Lemma 4.2, i.e., estimates for the Ritz projection +error but involves a few modifications. Let δ = v−Snv denote the shift error. We consider +the auxiliary problem: Find φ ∈ H2(Ω0) ∩ H1 +0(Ω0) such that +− ∆φ = δ +in Ω0. +(4.50) +We note that ∇φ ∈ H1(Ω0) from φ ∈ H2(Ω0), which means that ∇φ|Γn∪Γn−1 ∈ L2(Γn ∪ +Γn−1). Thus [∂nφ]|Γn∪Γn−1 = 0 in L2(Γn ∪ Γn−1). We denote by Ih,n = Ih,p=1,n the spatial +interpolation operator Ih,t, defined by (B.1), for p = 1 and t = tn, and note that Ih,nφ ∈ +Vh,n. The square of the left-hand side of (4.49) is +∥v − Snv∥2 +Ω0 = (δ, δ)Ω0 +(4.50) += +(−∆φ, δ)Ω0 = (−∆φ, v)Ω0 − (−∆φ, Snv)Ω0 +(A.7),(A.12) += +2 +� +i=1 +(∇φ, ∇v)Ωi,n−1 − (⟨∂nφ⟩, [v])Γn−1 − An(φ, Snv) +5th += An−1,n(v, φ) − An(φ, Snv) ± An−1,n(v, Ih,nφ) +(4.45) += +An−1,n(v, φ − Ih,nφ) − An(Snv, φ − Ih,nφ) +(4.33),(4.42) +≤ +C |||v|||An−1 |||φ − Ih,nφ|||An + C |||Snv|||An |||φ − Ih,nφ|||An +(B.9),(B.2) +≤ +C |||v|||An−1 +� +Ch∥D2 +xφ∥Ω0 +� ++ C |||Snv|||An +� +Ch∥D2 +xφ∥Ω0 +� +9th +≤ Ch |||v|||An−1 ∥∆φ∥Ω0 + Ch |||v|||An−1 ∥∆φ∥Ω0 +(4.50) += +Ch |||v|||An−1 ∥δ∥Ω0, +(4.51) +where we have used that [φ]|Γn = 0 to go to An−1,n in the fifth step. In the ninth step, we +have used elliptic regularity on H2(Ω0) ∩ H1 +0(Ω0) for φ, and the stability of Sn given by +(4.48). Dividing both sides of (4.51) by a factor ∥δ∥Ω0 = ∥v − Snv∥Ω0 gives (4.49). +Lemma 4.5 (An estimate for the shift energy). Let the bilinear form An be defined by +(4.2), the shift operator Sn = S + +n−1 by (4.45), the discrete Laplacian ∆n by (4.25), and +the spatial energy norm |||·|||An by (4.10). Then there exists a constant C > 0 such that +An−1(v, v) − An(Snv, Snv) ≤ Ch |||v|||An−1 ∥∆n−1v∥Ω0, +∀v ∈ Vh,n−1. +(4.52) +18 + +Proof. The left-hand side of (4.52) is +An−1(v, v) − An(Snv, Snv) +(4.45) += +An−1(v, v) − An−1,n(v, Snv) +(4.46) += +An−1(v, v) − An−1(v, S − +n−1Snv) += An−1(v, v − S − +n−1Snv +� +�� +� +∈Vh,n−1 +) +(4.25) += +(−∆n−1v, v − S − +n−1Snv)Ω0 +≤ ∥∆n−1v∥Ω0∥v − S − +n−1Snv ± Snv∥Ω0 +≤ ∥∆n−1v∥Ω0 +� +∥v − Snv∥Ω0 + ∥Snv − S − +n−1Snv∥Ω0 +� +7th +≤ ∥∆n−1v∥Ω0 +� +Ch |||v|||An−1 + Ch |||Snv|||An +� +(4.48) +≤ ∥∆n−1v∥Ω0 +� +Ch |||v|||An−1 + Ch |||v|||An−1 +� += Ch∥∆n−1v∥Ω0 |||v|||An−1 , +(4.53) +where, in the seventh step, we have used the estimate for the shift error given by (4.49) +for Sn and a corresponding result for S − +n−1. +By switching the order of the terms on the left-hand side of (4.52), and following the same +steps as in the proof of Lemma 4.5, we may obtain the same estimate. Moving the term +without the shift operator to the right-hand side gives us a result that we present as the +following corollary: +Corollary 4.2 (A stability result for the shift operator). Let the bilinear form An be +defined by (4.2), the shift operator Sn = S + +n−1 by (4.45), the discrete Laplacian ∆n by +(4.25), and the spatial energy norm |||·|||An by (4.10). Then there exists a constant C > 0 +such that +An(Snv, Snv) ≤ An−1(v, v) + Ch |||v|||An−1 ∥∆n−1v∥Ω0, +∀v ∈ Vh,n−1. +(4.54) +4.2 +The bilinear form Bh +For k ∈ N we define the broken Bochner-Sobolev spaces +Hk({In}N +n=1; L2(Ω0)) := {v ∈ L2((0, T]; L2(Ω0)) : +v|S0,n ∈ Hk(In; L2(Ω0)) for n = 1, . . . , N}. +(4.55) +19 + +We define the non-symmetric bilinear form Bh on H1({In}N +n=1; L2(Ω0))∩L2((0, T]; H1(∪iΩi(t))) +by +Bh(w, v) := +N +� +n=1 +� +In +( ˙w, v)Ω0 dt + +N +� +n=1 +� +In +Ah,t(w, v) dt ++ +N−1 +� +n=1 +([w]n, v+ +n )Ω0 + (w+ +0 , v+ +0 )Ω0. +(4.56) +We may then write (3.17) in compact form as: Find uh ∈ Vh such that +Bh(uh, v) = (u0, v+ +0 )Ω0 + +� T +0 +(f, v)Ω0 dt, +(4.57) +for all v ∈ Vh. By partially integrating the first term in (4.56), the bilinear form Bh can +be expressed differently, as noted in the following lemma: +Lemma 4.6 (Temporal partial integration in Bh). The bilinear form Bh, defined in (4.56), +can be written as +Bh(w, v) = +N +� +n=1 +� +In +(w, −˙v)Ω0 dt + +N +� +n=1 +� +In +Ah,t(w, v) dt ++ +N−1 +� +n=1 +(w− +n , −[v]n)Ω0 + (w− +N, v− +N)Ω0. +(4.58) +Proof. The first term in (4.56) is +N +� +n=1 +� +In +( ˙w, v)Ω0 dt = +N +� +n=1 +� +In +� +Ω0 +˙wv dx dt = +N +� +n=1 +� +Ω0 +� � +In +˙wv dt +� +dx += +N +� +n=1 +� +Ω0 +�� +wv +� +In − +� +In +w ˙v dt +� +dx += +N +� +n=1 +� +Ω0 +w− +n v− +n − w+ +n−1v+ +n−1 dx + +N +� +n=1 +� +Ω0 +� +In +−w ˙v dt dx += +N +� +n=1 +� +(w− +n , v− +n )Ω0 − (w+ +n−1, v+ +n−1)Ω0 +� ++ +N +� +n=1 +� +In +(w, −˙v)Ω0 dt +(4.59) +where the second term in the last row is as we want it. We combine the first term in the +20 + +last row of (4.59) with the third and fourth terms in (4.56) to yield +N +� +n=1 +� +(w− +n , v− +n )Ω0 − (w+ +n−1, v+ +n−1)Ω0 +� ++ +N−1 +� +n=1 +([w]n, v+ +n )Ω0 + (w+ +0 , v+ +0 )Ω0 += +N−1 +� +n=1 +� +(w− +n , v− +n )Ω0 − (w+ +n−1, v+ +n−1)Ω0 + (w+ +n , v+ +n )Ω0 − (w− +n , v+ +n )Ω0 +� ++ (w− +N, v− +N)Ω0 − (w+ +N−1, v+ +N−1)Ω0 + (w+ +0 , v+ +0 )Ω0 += +N−1 +� +n=1 +� +(w− +n , v− +n )Ω0 − (w− +n , v+ +n )Ω0 +� ++ (w+ +0 , v+ +0 )Ω0 − (w+ +0 , v+ +0 )Ω0 +� +�� +� += 0 ++(w− +N, v− +N)Ω0 + (w+ +N−1, v+ +N−1)Ω0 − (w+ +N−1, v+ +N−1)Ω0 +� +�� +� += 0 += +N−1 +� +n=1 +(w− +n , −[v]n)Ω0 + (w− +N, v− +N)Ω0. +(4.60) +Using the identities (4.59) and (4.60) in (4.56) gives (4.58). +4.3 +Consistency and Galerkin orthogonality +To show Galerkin orthogonality for the bilinear form Bh, we need the following lemma on +consistency: +Lemma 4.7 (Consistency). The solution u to problem (2.5) also solves (3.17). +Proof. Insert u in place of uh in the expression on the left-hand side of (3.17). From the +regularity of u, we have, for n = 1, . . . , N, [u]n−1 = 0, [u] = 0 and [∇u] = 0. Writing +� +i,n = �2 +i=1 +�N +n=1, the left-hand side of (3.17) with u becomes +N +� +n=1 +� +In +( ˙u, v)Ω0 dt + +� +i,n +� +In +(∇u, ∇v)Ωi,n dt + +N +� +n=1 +� +¯Γn +−⟨∂nu⟩[v] d¯s. +(4.61) +The second term in (4.61) is +� +i,n +� +In +(∇u, ∇v)Ωi,n dt = +� +i,n +� +In +� � +Ωi,n +∇u · ∇v dx +� +dt += +� +i,n +� +In +� � +Ωi,n +−∆uv dx + +� +∂Ωi,n +(n · ∇uv)i ds +� +dt += +� +i,n +� +In +(−∆u, v)Ωi,n dt + +� +i,n +� +In +� +∂Ωi,n +(n · ∇uv)i ds dt, +(4.62) +21 + +where ni is the outward pointing normal vector to ∂Ωi,n. We leave the first term in the +last row of (4.62) as it is and consider the second term. +� +i,n +� +In +� +∂Ωi,n +(n · ∇uv)i ds dt += +� +i,n +� +In +� � +∂Ωi,n∩∂Ω0 +(n · ∇uv)i ds +� +�� +� +=0, since v=0 on ∂Ω0 ++ +� +Γn +(n · ∇uv)i ds +� +dt += +� +i,n +� +In +� +Γn +(n · ∇uv)i ds dt = +N +� +n=1 +2 +� +i=1 +� +¯Γn +(n · ∇uv)i d¯s += +N +� +n=1 +� +¯Γn +n1 · ∇u1v1 + n2 · ∇u2v2 d¯s +5th += +N +� +n=1 +� +¯Γn +n · [∇uv] d¯s = +N +� +n=1 +� +¯Γn +[(∂nu)v] d¯s +7th += +N +� +n=1 +� +¯Γn +[∂nu]⟨v⟩ + ⟨∂nu⟩[v] + (ω2 − ω1)[∂nu][v] d¯s += +N +� +n=1 +� +¯Γn +⟨∂nu⟩[v] d¯s, +(4.63) +where we have taken n = n1 = −n2 and [v] = v1 − v2 to obtain the fifth equality, applied +(A.1) to get the seventh equality and finally, to obtain the last equality, we have used +[∂nu] = 0, which follows from the regularity of u. Using the identities (4.62) and (4.63), +we have that (4.61), i.e., the left-hand side of (3.17) with u instead of uh, is +N +� +n=1 +� +In +( ˙u, v)Ω0 dt + +� +i,n +� +In +(∇u, ∇v)Ωi,n dt + +N +� +n=1 +� +¯Γn +−⟨∂nu⟩[v] d¯s += +N +� +n=1 +� +In +( ˙u, v)Ω0 dt + +� +i,n +� +In +(−∆u, v)Ωi,n dt + +N +� +n=1 +� +¯Γn +⟨∂nu⟩[v] − ⟨∂nu⟩[v] +� +�� +� +=0 +d¯s += +� +i,n +� +In +( ˙u − ∆u, v)Ωi,n dt = +� T +0 +( ˙u − ∆u, v)Ω0 dt +(2.5) += +� T +0 +(f, v)Ω0 dt, +(4.64) +which is the right-hand side of (3.17). This completes the proof. +From Lemma 4.7, we have that u solves (3.17). Since (4.57) is just another way of writing +(3.17), u solves (4.57) as well. From this we may obtain a Galerkin orthogonality which +we present as the following corollary: +22 + +Corollary 4.3 (Galerkin orthogonality). Let the bilinear form Bh be defined by (4.56), +and let u and uh be the solutions of (2.5) and (3.17), respectively. Then +Bh(u − uh, v) = 0, +∀v ∈ Vh. +(4.65) +4.4 +A discrete dual problem +We now consider the function zh ∈ Vh defined by +Bh(v, zh) = (v− +N, z+ +h,N)Ω0, +∀v ∈ Vh. +(4.66) +From (4.66), the function zh is the solution of a discrete dual problem to (2.5). With the +alternative way of expressing Bh from Lemma 4.6, we may write (4.66) as the following +discrete dual problem that goes backwards in time: Find zh ∈ Vh such that +N +� +n=1 +� +In +(v, − ˙zh)Ω0 dt + +N +� +n=1 +� +In +Ah,t(v, zh) dt ++ +N−1 +� +n=1 +(v− +n , −[zh]n)Ω0 + (v− +N, z− +h,N)Ω0 += (v− +N, z+ +h,N)Ω0, +(4.67) +for all v ∈ Vh. Thus, we may consider zh to be the finite element solution of the following +continuous dual problem: +� +� +� +� +� +− ˙z − ∆z = 0 +in Ω0 × [0, T), +z = 0 +on ∂Ω0 × [0, T), +z = z+ +h,N +in Ω0 × {T}. +(4.68) +5 +Stability analysis +The stability analysis in this section is based on a stability analysis for the case with only +a background mesh, presented by Eriksson and Johnson in [12, 13]. Due to the CutFEM +setting, the original analysis has been slightly modified by the incorporation of the shift +operator defined by (4.45). The main result of this section is the following stability estimate +and its counterpart for the discrete dual problem: +Theorem 5.1 (The main stability estimate). Let uh be the solution of (3.17) with f ≡ 0 +and let u0 be the initial value of the analytic solution of the problem presented in Section +2. Then we have that +∥u− +h,N∥Ω0 + +N +� +n=1 +� +In +∥ ˙uh∥Ω0 + ∥∆nuh∥Ω0 dt + +N +� +n=1 +∥[uh]n−1∥Ω0 ≤ C1∥u0∥Ω0, +(5.1) +where C1 = C(log(tN/k1) + 1)1/2 and C > 0 is a constant. +23 + +The counterpart of (5.1) for zh is a crucial tool in the proof of the a priori error estimate +presented in Theorem 6.1 in Section 6. For the purpose of that application, we replace the +initial time jump term. From (5.1), we have that ∥[uh]0∥Ω0 ≤ C1∥u0∥Ω0. The corresponding +inequality for zh is ∥[zh]N∥Ω0 ≤ CN∥z+ +h,N∥Ω0, where CN = C(log(tN/kN)+1)1/2 and C > 0. +Squaring both sides in this inequality gives us +C2 +N∥z+ +h,N∥2 +Ω0 ≥ ∥[zh]N∥2 +Ω0 = ([zh]N, [zh]N)Ω0 = (z+ +h,N − z− +h,N, z+ +h,N − z− +h,N)Ω0 +≥ ∥z+ +h,N∥2 +Ω0 +� +�� +� +≥0 +−2(z+ +h,N, z− +h,N)Ω0 + ∥z− +h,N∥2 +Ω0 ≥ ∥z− +h,N∥2 +Ω0 − 2(z+ +h,N, z− +h,N)Ω0, +(5.2) +from which we get +∥z− +h,N∥2 +Ω0 ≤ C2 +N∥z+ +h,N∥2 +Ω0 + 2(z+ +h,N, z− +h,N)Ω0 ≤ C2 +N∥z+ +h,N∥2 +Ω0 + 2∥z+ +h,N∥Ω0∥z− +h,N∥Ω0 +≤ C2 +N∥z+ +h,N∥2 +Ω0 + 2∥z+ +h,N∥2 +Ω0 + 1 +2∥z− +h,N∥2 +Ω0, +=⇒ +∥z− +h,N∥Ω0 ≤ CN∥z+ +h,N∥Ω0. +(5.3) +We use this results in the corresponding stability estimate for zh, which we present as the +following corollary: +Corollary 5.1 (A stability estimate for zh). A corresponding stability estimate to (5.1) +for the finite element solution zh to the discrete dual problem (4.67) is +∥z+ +h,0∥Ω0 + +N +� +n=1 +� +In +∥ ˙zh∥Ω0 + ∥∆nzh∥Ω0 dt + +N−1 +� +n=1 +∥[zh]n∥Ω0 + ∥z− +h,N∥Ω0 ≤ CN∥z+ +h,N∥Ω0, (5.4) +where CN = C(log(tN/kN) + 1)1/2 and C > 0 is a constant. +To prove Theorem 5.1 and thus also Corollary 5.1, we need two other stability estimates +for the finite element problem (3.17). We start by letting f ≡ 0 in (4.5). We have: Find +uh ∈ Vh such that +N +� +n=1 +� +In +( ˙uh, v)Ω0 dt + +N +� +n=1 +� +In +An(uh, v) dt + +N +� +n=1 +([uh]n−1, v+ +n−1)Ω0 = 0, +(5.5) +for all v ∈ Vh. +5.1 +The basic stability estimate +The first of the two auxiliary stability estimates is presented as the following lemma: +Lemma 5.1 (The basic stability estimate). Let uh be the solution of (3.17) with f ≡ 0 +and let u0 be the initial value of the analytic solution of the problem presented in Section +2. Then there exists a constant C > 0 such that +∥u− +h,N∥2 +Ω0 + +N +� +n=1 +� +In +|||uh|||2 +An dt + +N +� +n=1 +∥[uh]n−1∥2 +Ω0 ≤ C∥u0∥2 +Ω0. +(5.6) +24 + +Proof. By taking v = 2uh ∈ Vh in (5.5), we have +N +� +n=1 +� +In +2( ˙uh, uh)Ω0 dt +� +�� +� +=I ++ 2 +N +� +n=1 +� +In +An(uh, uh) dt +� +�� +� +=II ++ +N +� +n=1 +2([uh]n−1, u+ +h,n−1)Ω0 +� +�� +� +=III += 0. +(5.7) +We consider the terms in (5.7) separately, starting with the first: +I = +N +� +n=1 +� +In +2( ˙uh, uh)Ω0 dt = +N +� +n=1 +� +In +� +Ω0 +2 ˙uhuh dx dt = +N +� +n=1 +� +In +� +Ω0 +∂t(u2 +h) dx dt += +N +� +n=1 +� +In +∂t +� � +Ω0 +u2 +h dx +� +dt = +N +� +n=1 +� +In +∂t∥uh∥2 +Ω0 dt += +N +� +n=1 +� +∥u− +h,n∥2 +Ω0 − ∥u+ +h,n−1∥2 +Ω0 +� +. +(5.8) +For the treatment of the second term in (5.7), we note that we may apply Lemma 4.1, +since uh(·, t) ∈ Vh(t) for any t ∈ (0, T]. We thus have +II = 2 +N +� +n=1 +� +In +An(uh, uh) dt +(4.11) +≥ 2 +N +� +n=1 +� +In +αt |||uh|||2 +An dt +≥ 2 min +t∈(0,T]{αt} +N +� +n=1 +� +In +|||uh|||2 +An dt. +(5.9) +We move on to the third term. We note that for n = 1, . . . , N, we have from the algebraic +identity (a − b)2 = a2 − 2ab + b2 that +2([uh]n−1, u+ +h,n−1)Ω0 = ∥[uh]n−1∥2 +Ω0 + ∥u+ +h,n−1∥2 +Ω0 − ∥[uh]n−1 − u+ +h,n−1∥2 +Ω0 += ∥[uh]n−1∥2 +Ω0 + ∥u+ +h,n−1∥2 +Ω0 − ∥u− +h,n−1∥2 +Ω0. +(5.10) +With (5.10), the third term in (5.7) is +III = +N +� +n=1 +2([uh]n−1, u+ +h,n−1)Ω0 += +N +� +n=1 +∥[uh]n−1∥2 +Ω0 + +N +� +n=1 +� +∥u+ +h,n−1∥2 +Ω0 − ∥u− +h,n−1∥2 +Ω0 +� +. +(5.11) +Now we add the second term on the right-hand side of (5.11) to the right-hand side of (5.8) +25 + +to obtain +N +� +n=1 +� +∥u− +h,n∥2 +Ω0 − ∥u+ +h,n−1∥2 +Ω0 +� ++ +N +� +n=1 +� +∥u+ +h,n−1∥2 +Ω0 − ∥u− +h,n−1∥2 +Ω0 +� += +N +� +n=1 +� +∥u− +h,n∥2 +Ω0 −∥u+ +h,n−1∥2 +Ω0 + ∥u+ +h,n−1∥2 +Ω0 +� +�� +� += 0 +−∥u− +h,n−1∥2 +Ω0 +� += +N−1 +� +n=1 +� +∥u− +h,n∥2 +Ω0 − ∥u− +h,n∥2 +Ω0 +� +�� +� += 0 +� ++ +� +∥u− +h,N∥2 +Ω0 − ∥u− +h,0∥2 +Ω0 +� += ∥u− +h,N∥2 +Ω0 − ∥u− +h,0∥2 +Ω0, +(5.12) +Inserting (5.8), (5.9) and (5.11) into (5.7), and using (5.12), we have +2 min +t∈(0,T]{αt} +N +� +n=1 +� +In +|||uh|||2 +An dt + +N +� +n=1 +∥[uh]n−1∥2 +Ω0 + ∥u− +h,N∥2 +Ω0 − ∥u− +h,0∥2 +Ω0 ≤ 0, +(5.13) +which implies +min +� +2 min +t∈(0,T]{αt}, 1 +�� +N +� +n=1 +� +In +|||uh|||2 +An dt + +N +� +n=1 +∥[uh]n−1∥2 +Ω0 + ∥u− +h,N∥2 +Ω0 +� +≤ ∥u− +h,0∥2 +Ω0 = ∥P0u0∥2 +Ω0 ≤ ∥u0∥2 +Ω0, +(5.14) +where we have used that u− +h,0 = P0u0 in the last identity, where P0 is defined by (4.15). +The last inequality follows from the boundedness of P0. Dividing both sides of (5.14) by +the min-factor on the left-hand side gives (5.6). +5.2 +The strong stability estimate +Lemma 5.2 (The strong stability estimate). Let uh be the solution of (3.17) with f ≡ 0 +and let u0 be the initial value of the analytic solution of the problem presented in Section +2. Then there exists a constant C > 0 such that +N +� +n=1 +tn +� +In +∥ ˙uh∥2 +Ω0 + ∥∆nuh∥2 +Ω0 dt + +N +� +n=2 +tn +kn +∥[uh]n−1∥2 +Ω0 ≤ C∥u0∥2 +Ω0. +(5.15) +Proof. By taking v = −∆nuh ∈ Vh in (5.5), we have +� +In +( ˙uh, −∆nuh)Ω0 dt +� +�� +� +=I ++ +� +In +An(uh, −∆nuh) dt +� +�� +� +=II ++ ([uh]n−1, (−∆nuh)+ +n−1)Ω0 +� +�� +� +=III += 0. +(5.16) +26 + +We consider the terms separately, starting with the first: +I = +� +In +( ˙uh, −∆nuh)Ω0 dt +(4.25) += +� +In +An( ˙uh, uh) dt = +� +In +1 +2∂tAn(uh, uh) dt += 1 +2An(u− +h,n, u− +h,n) − 1 +2An(u+ +h,n−1, u+ +h,n−1). +(5.17) +The second term in (5.16) is +II = +� +In +An(uh, −∆nuh) dt +(4.25) += +� +In +(−∆nuh, −∆nuh)Ω0 dt = +� +In +∥∆nuh∥2 +Ω0 dt. +(5.18) +The third term in (5.16) is +III = ([uh]n−1, (−∆nuh)+ +n−1)Ω0 = (u+ +h,n−1 − u− +h,n−1, −∆nu+ +h,n−1)Ω0 += (u+ +h,n−1, −∆nu+ +h,n−1)Ω0 − (u− +h,n−1, −∆nu+ +h,n−1)Ω0 +(4.25) += +An(u+ +h,n−1, u+ +h,n−1) − (u− +h,n−1, −∆nu+ +h,n−1)Ω0. +(5.19) +Using the identities (5.17), (5.18), and (5.19) in (5.16), we may obtain +1 +2An(u− +h,n, u− +h,n) + +� +In +∥∆nuh∥2 +Ω0 dt + 1 +2An(u+ +h,n−1, u+ +h,n−1) = (u− +h,n−1, −∆nu+ +h,n−1)Ω0. (5.20) +For n = 1, the right-hand side of (5.20) is +(u− +h,n−1, −∆nu+ +h,n−1)Ω0 = (u− +h,0, −∆1u+ +h,0)Ω0 = (P0u0, −∆1u+ +h,0)Ω0 +≤ ∥P0u0∥Ω0∥∆1u+ +h,0∥Ω0 ≤ +1 +2εk1 +∥P0u0∥2 +Ω0 + εk1 +2 ∥∆1u+ +h,0∥2 +Ω0 +≤ +1 +2εt1 +∥u0∥2 +Ω0 + εC +� +I1 +∥∆1uh∥2 +Ω0 dt. +(5.21) +For n = 2, . . . , N, we would like to use (4.25), i.e., the definition of the discrete Laplacian, +on the right-hand side of (5.20), but due to the dG(0) movement of TG, u− +h,n−1 /∈ Vh,n, so we +cannot use (4.25) directly. To handle this, we make use of the shift operator Sn : Vh,n−1 → +Vh,n, defined by (4.45), as follows: +(u− +h,n−1, −∆nu+ +h,n−1)Ω0 = (u− +h,n−1 ± Snu− +h,n−1, −∆nu+ +h,n−1)Ω0 += ((1 − Sn)u− +h,n−1, −∆nu+ +h,n−1)Ω0 +� +�� +� +=RHS.I ++ (Snu− +h,n−1, −∆nu+ +h,n−1)Ω0 +� +�� +� +=RHS.II +. +(5.22) +27 + +We treat the terms separately, starting with the first: +RHS.I = ((1 − Sn)u− +h,n−1, −∆nu+ +h,n−1)Ω0 ≤ ∥(1 − Sn)u− +h,n−1∥Ω0∥∆nu+ +h,n−1∥Ω0 +(4.49) +≤ Ch +������u− +h,n−1 +������ +An−1 ∥∆nu+ +h,n−1∥Ω0 +≤ C +ε1 +h +������u− +h,n−1 +������2 +An−1 + ε1Ch∥∆nu+ +h,n−1∥2 +Ω0 +(3.1) +≤ C +ε1 +kn−1 +������u− +h,n−1 +������2 +An−1 + ε1Ckn∥∆nu+ +h,n−1∥2 +Ω0 +≤ C +ε1 +� +In−1 +|||uh|||2 +An−1 dt + ε1C +� +In +∥∆nuh∥2 +Ω0 dt, +(5.23) +where we have used Lemma 4.4, i.e., an estimate for the shift error, to obtain the second +inequality, and the spatiotemporal quasiuniformity to obtain the fourth. We consider the +second term in (5.22). For the sake of brevity we write S = Sn, U = uh,n−1 and thus have +RHS.II = (Snu− +h,n−1 +� +�� +� +∈Vh,n +, −∆nu+ +h,n−1)Ω0 +(4.25) += +An(Snu− +h,n−1, u+ +h,n−1) = An(S U −, U +). +(5.24) +From the algebraic identity (a − b)2 = a2 − 2ab + b2 we have that +2An(S U −, U +) = An(S U −, S U −) + An(U +, U +) +− An(S U − − U +, S U − − U +) +� +�� +� +≥0 from (4.11) +≤ An(S U −, S U −) + An(U +, U +), +(5.25) +where we have used Lemma 4.1, i.e., the discrete coercivity of Ah,t, to obtain the inequality. +Dividing both sides of (5.25) by 2, we get that +RHS.II = An(Snu− +h,n−1, u+ +h,n−1) +≤ 1 +2An(Snu− +h,n−1, Snu− +h,n−1) + 1 +2An(u+ +h,n−1, u+ +h,n−1) +(4.54) +≤ +1 +2An−1(u− +h,n−1, u− +h,n−1) + Ch +������u− +h,n−1 +������ +An−1 ∥∆n−1u− +h,n−1∥Ω0 ++ 1 +2An(u+ +h,n−1, u+ +h,n−1) +(5.23) +≤ +1 +2An−1(u− +h,n−1, u− +h,n−1) + 1 +2An(u+ +h,n−1, u+ +h,n−1) ++ C +ε2 +� +In−1 +|||uh|||2 +An−1 dt + ε2C +� +In−1 +∥∆n−1uh∥2 +Ω0 dt, +(5.26) +where we have used Corollary 4.2, i.e., the stability result of Sn, in the third step, and +then treated the resulting middle term by following the steps in (5.23). We now collect the +28 + +estimates for the right-hand side of (5.20). For n = 1, we use (5.21) in (5.20) and get +1 +2A1(u− +h,1, u− +h,1) + +� +I1 +∥∆1uh∥2 +Ω0 dt + 1 +2A1(u+ +h,0, u+ +h,0) +≤ 1 +2εt1 +∥u0∥2 +Ω0 + εC +� +I1 +∥∆1uh∥2 +Ω0 dt. +(5.27) +Using that A1(u+ +h,0, u+ +h,0) ≥ 0 from (4.11), moving over the last term on the right-hand side +to the left-hand side, and multiplying both sides by t1, (5.27) becomes +t1 +2 A1(u− +h,1, u− +h,1) + (1 − εC)t1 +� +I1 +∥∆1uh∥2 +Ω0 dt ≤ +t1 +2εt1 +∥u0∥2 +Ω0 = 1 +2ε∥u0∥2 +Ω0, +=⇒ +t1 +2 A1(u− +h,1, u− +h,1) + Ct1 +� +I1 +∥∆1uh∥2 +Ω0 dt ≤ C∥u0∥2 +Ω0, +(5.28) +where the last step follows from taking ε sufficiently small. For n = 2, . . . , N, we insert +(5.22) into (5.20), use (5.23) and (5.26), and thus get +1 +2An(u− +h,n, u− +h,n) + +� +In +∥∆nuh∥2 +Ω0 dt + 1 +2An(u+ +h,n−1, u+ +h,n−1) += (u− +h,n−1, −∆nu+ +h,n−1)Ω0 +(5.22) += +RHS.I + RHS.II +(5.23),(5.26) +≤ +C +ε1 +� +In−1 +|||uh|||2 +An−1 dt + ε1C +� +In +∥∆nuh∥2 +Ω0 dt ++ 1 +2An−1(u− +h,n−1, u− +h,n−1) + 1 +2An(u+ +h,n−1, u+ +h,n−1) ++ C +ε2 +� +In−1 +|||uh|||2 +An−1 dt + ε2C +� +In−1 +∥∆n−1uh∥2 +Ω0 dt. +(5.29) +Cancelling 1 +2An(u+ +h,n−1, u+ +h,n−1) on both sides, moving over the second term on the right- +hand side to the left-hand side, and multiplying both sides by tn, (5.29) becomes +tn +2 An(u− +h,n, u− +h,n) + (1 − ε1C)tn +� +In +∥∆nuh∥2 +Ω0 dt +≤ tn +�C +ε1 ++ C +ε2 +� � +In−1 +|||uh|||2 +An−1 dt + tnε2C +� +In−1 +∥∆n−1uh∥2 +Ω0 dt ++ tn +2 An−1(u− +h,n−1, u− +h,n−1), +=⇒ +tn +2 An(u− +h,n, u− +h,n) + Ctn +� +In +∥∆nuh∥2 +Ω0 dt − tn−1 +2 An−1(u− +h,n−1, u− +h,n−1) +≤ C +� +In−1 +|||uh|||2 +An−1 dt + tnε2C +� +In−1 +∥∆n−1uh∥2 +Ω0 dt ++ tn +2 An−1(u− +h,n−1, u− +h,n−1) − tn−1 +2 An−1(u− +h,n−1, u− +h,n−1), +(5.30) +29 + +where the step after the arrow follows from taking ε1 sufficiently small and subtracting +tn−1 +2 An−1(u− +h,n−1, u− +h,n−1) on both sides. The difference in the last row of the right-hand side +is +tn +2 An−1(u− +h,n−1, u− +h,n−1) − tn−1 +2 An−1(u− +h,n−1, u− +h,n−1) += kn +2 An−1(u− +h,n−1, u− +h,n−1) ≤ Ckn +������u− +h,n−1 +������2 +An−1 ≤ Ckn−1 +������u− +h,n−1 +������2 +An−1 +≤ C +� +In−1 +|||uh|||2 +An−1 dt, +(5.31) +which we combine with the first term on the right-hand side of (5.30). Summing (5.30) +over n = 2, . . . , N and adding (5.28), we obtain +N +� +n=1 +tn +2 An(u− +h,n, u− +h,n) + C +N +� +n=1 +tn +� +In +∥∆nuh∥2 +Ω0 dt − +N +� +n=2 +tn−1 +2 An−1(u− +h,n−1, u− +h,n−1) +≤ C∥u0∥2 +Ω0 + C +N +� +n=2 +� +In−1 +|||uh|||2 +An−1 dt + +N +� +n=2 +tnε2C +� +In−1 +∥∆n−1uh∥2 +Ω0 dt. +(5.32) +The tnAn-terms in (5.32) form a telescoping sum where only the tNAN-term remains. Also +using that tn ≤ Ctn−1 for n = 2, . . . , N, which follows from the quasi-uniformity of the +temporal discretization, we thus have +tN +2 AN(u− +h,N, u− +h,N) + C +N +� +n=1 +tn +� +In +∥∆nuh∥2 +Ω0 dt +≤ C∥u0∥2 +Ω0 + C +N +� +n=2 +� +In−1 +|||uh|||2 +An−1 dt + ε2C +N +� +n=2 +tn−1 +� +In−1 +∥∆n−1uh∥2 +Ω0 dt +(5.6) +≤ C∥u0∥2 +Ω0 + C∥u0∥2 +Ω0 + ε2C +N−1 +� +n=1 +tn +� +In +∥∆nuh∥2 +Ω0 dt. +(5.33) +Moving the last term on the right-hand side to the left-hand side and taking ε2 sufficiently +small, gives us the following stability estimate for the discrete Laplacian of the finite +element solution: +N +� +n=1 +tn +� +In +∥∆nuh∥2 +Ω0 dt ≤ C∥u0∥2 +Ω0. +(5.34) +It is thus sufficient to estimate the time-derivative terms and the time jump terms on the +left-hand side of (5.15) by the left-hand side of (5.34) to obtain (5.15). +We proceed by showing an estimate for the time-derivative terms. +By taking v = +30 + +(t − tn−1) ˙uh in (5.5), we have +� +In +( ˙uh, (t − tn−1) ˙uh)Ω0 dt +� +�� +� +=I ++ +� +In +An(uh, (t − tn−1) ˙uh) dt +� +�� +� +=II ++ ++ ([uh]n−1, ((t − tn−1) ˙uh)+ +n−1)Ω0 +� +�� +� +=III += 0. +(5.35) +We consider the terms separately, starting with the first: +I = +� +In +( ˙uh, (t − tn−1) ˙uh)Ω0 dt = +� +In +(t − tn−1)( ˙uh, ˙uh)Ω0 dt += +� +In +(t − tn−1)∥ ˙uh∥2 +Ω0 dt. +(5.36) +The second term in (5.35) is +II = +� +In +An(uh, (t − tn−1) ˙uh) dt = +� +In +(t − tn−1)An(uh, ˙uh) dt +(4.25) += +� +In +(t − tn−1)(−∆nuh, ˙uh)Ω0 dt +(5.37) +The third term in (5.35) is +III = ([uh]n−1, ((t − tn−1) ˙uh)+ +n−1)Ω0 = ([uh]n−1, (tn−1 − tn−1) +� +�� +� +=0 +˙u+ +h,n−1)Ω0 = 0. +(5.38) +Using the identities (5.36), (5.37), and (5.38) in (5.35), we may obtain +� +In +(t − tn−1)∥ ˙uh∥2 +Ω0 dt = +� +In +(t − tn−1)(∆nuh, ˙uh)Ω0 dt +≤ +� +In +(t − tn−1)∥∆nuh∥Ω0∥ ˙uh∥Ω0 dt +≤ +� � +In +(t − tn−1)∥∆nuh∥2 +Ω0 dt +�1/2� � +In +(t − tn−1)∥ ˙uh∥2 +Ω0 dt +�1/2 +, +=⇒ +� +In +(t − tn−1)∥ ˙uh∥2 +Ω0 dt ≤ +� +In +(t − tn−1)∥∆nuh∥2 +Ω0 dt. +(5.39) +By using an inverse estimate and (5.39), we have +� +In +∥ ˙uh∥2 +Ω0 dt ≤ Ck−1 +n +� +In +(t − tn−1)∥ ˙uh∥2 +Ω0 dt ≤ Ck−1 +n +� +In +(t − tn−1)∥∆nuh∥2 +Ω0 dt +≤ Ck−1 +n kn +� +In +∥∆nuh∥2 +Ω0 dt = C +� +In +∥∆nuh∥2 +Ω0 dt. +(5.40) +31 + +We proceed by showing an estimate for the time jump terms for n = 2, . . . , N. We +would like to take v = [uh]n−1 = u+ +h,n−1 − u− +h,n−1 in (5.5), but due to the dG(0) movement +of TG, u− +h,n−1 /∈ Vh,n as already pointed out, so we cannot make this choice of v. To handle +this, we use the L2(Ω0)-projection Pn. By taking v = Pn[uh]n−1 in (5.5), we have +� +In +( ˙uh, Pn[uh]n−1)Ω0 dt +� +�� +� +=I ++ +� +In +An(uh, Pn[uh]n−1) dt +� +�� +� +=II ++ ++ ([uh]n−1, (Pn[uh]n−1)+ +n−1)Ω0 +� +�� +� +=III += 0. +(5.41) +We consider the terms separately, starting with the first: +I = +� +In +( ˙uh, Pn[uh]n−1)Ω0 dt +(4.15) += +� +In +( ˙uh, [uh]n−1)Ω0 dt. +(5.42) +The second term in (5.35) is +II = +� +In +An(uh, Pn[uh]n−1) dt +(4.25) += +� +In +(−∆nuh, Pn[uh]n−1)Ω0 dt +(4.15) += +� +In +(−∆nuh, [uh]n−1)Ω0 dt. +(5.43) +The third term in (5.41) is +III = ([uh]n−1, (Pn[uh]n−1)+ +n−1)Ω0 = ([uh]n−1, Pn[uh]n−1)Ω0 += ([uh]n−1, Pn[uh]n−1 ± [uh]n−1)Ω0 += ([uh]n−1, Pn[uh]n−1 − [uh]n−1)Ω0 + ([uh]n−1, [uh]n−1)Ω0 += ∥[uh]n−1∥2 +Ω0 − ([uh]n−1, (1 − Pn)[uh]n−1)Ω0. +(5.44) +Using the identities (5.42), (5.43), and (5.44) in (5.41), we may obtain +∥[uh]n−1∥2 +Ω0 = ([uh]n−1, (1 − Pn)[uh]n−1)Ω0 +� +�� +� +=RHS.I ++ +� +In +(∆nuh − ˙uh, [uh]n−1)Ω0 dt +� +�� +� +=RHS.II +. +(5.45) +32 + +We treat the terms separately, starting with the first. For n = 2, . . . , N, the first term is +RHS.I = ([uh]n−1, (1 − Pn)[uh]n−1)Ω0 = ([uh]n−1, [uh]n−1 − Pn[uh]n−1)Ω0 += ([uh]n−1, u+ +h,n−1 − u− +h,n−1 − Pnu+ +h,n−1 +� +�� +� +=u+ +h,n−1 ++Pnu− +h,n−1)Ω0 += ([uh]n−1, −(u− +h,n−1 − Pnu− +h,n−1))Ω0 ≤ ∥[uh]n−1∥Ω0∥u− +h,n−1 − Pnu− +h,n−1∥Ω0 +≤ ∥[uh]n−1∥Ω0∥u− +h,n−1 − Snu− +h,n−1∥Ω0 +(4.49) +≤ ∥[uh]n−1∥Ω0Ch +������u− +h,n−1 +������ +An−1 +≤ 1 +4∥[uh]n−1∥2 +Ω0 + (Ch)2 ������u− +h,n−1 +������2 +An−1 +(3.1) +≤ 1 +4∥[uh]n−1∥2 +Ω0 + Cknkn−1 +������u− +h,n−1 +������2 +An−1 +≤ 1 +4∥[uh]n−1∥2 +Ω0 + Ckn +� +In−1 +|||uh|||2 +An−1 dt. +(5.46) +The second term in (5.45) is +RHS.II = +� +In +(∆nuh − ˙uh, [uh]n−1)Ω0 dt ≤ +� +In +∥∆nuh − ˙uh∥Ω0∥[uh]n−1∥Ω0 dt += ∥[uh]n−1∥Ω0 +� +In +∥∆nuh − ˙uh∥Ω0 dt +≤ ∥[uh]n−1∥Ω0 +� � +In +12 dt +�1/2� � +In +∥∆nuh − ˙uh∥2 +Ω0 dt +�1/2 += ∥[uh]n−1∥Ω0 +� +kn +� +In +∥∆nuh − ˙uh∥2 +Ω0 dt +�1/2 += 1 +4∥[uh]n−1∥2 +Ω0 + kn +� +In +∥∆nuh − ˙uh∥2 +Ω0 dt +≤ 1 +4∥[uh]n−1∥2 +Ω0 + 2kn +� +In +∥∆nuh∥2 +Ω0 + ∥ ˙uh∥2 +Ω0 dt. +(5.47) +Using the estimates (5.46) and (5.47) in (5.45), we may obtain, for n = 2, . . . , N, that +1 +2∥[uh]n−1∥2 +Ω0 ≤ Ckn +� +In−1 +|||uh|||2 +An−1 dt + 2kn +� +In +∥∆nuh∥2 +Ω0 + ∥ ˙uh∥2 +Ω0 dt, +=⇒ +1 +kn +∥[uh]n−1∥2 +Ω0 ≤ C +� +In +∥ ˙uh∥2 +Ω0 + ∥∆nuh∥2 +Ω0 dt + C +� +In−1 +|||uh|||2 +An−1 dt. +(5.48) +Finally we have all the partial results that are needed to show the desired stability +33 + +estimate (5.15). We start with the left-hand side of (5.15). +N +� +n=1 +tn +� +In +∥ ˙uh∥2 +Ω0 + ∥∆nuh∥2 +Ω0 dt + +N +� +n=2 +tn +kn +∥[uh]n−1∥2 +Ω0 +(5.48) +≤ +N +� +n=1 +tn +� +In +∥ ˙uh∥2 +Ω0 + ∥∆nuh∥2 +Ω0 dt ++ +N +� +n=2 +tn +� +C +� +In +∥ ˙uh∥2 +Ω0 + ∥∆nuh∥2 +Ω0 dt + C +� +In−1 +|||uh|||2 +An−1 dt +� +≤ C +N +� +n=1 +tn +� +In +∥ ˙uh∥2 +Ω0 + ∥∆nuh∥2 +Ω0 dt + C +N +� +n=2 +� +In−1 +|||uh|||2 +An−1 dt +(5.6) +≤ C +N +� +n=1 +tn +� � +In +∥ ˙uh∥2 +Ω0 dt + +� +In +∥∆nuh∥2 +Ω0 dt +� ++ C∥u0∥2 +Ω0 +(5.40) +≤ C +N +� +n=1 +tn +� +C +� +In +∥∆nuh∥2 +Ω0 dt + +� +In +∥∆nuh∥2 +Ω0 dt +� ++ C∥u0∥2 +Ω0 +≤ C +N +� +n=1 +tn +� +In +∥∆nuh∥2 +Ω0 dt + C∥u0∥2 +Ω0 +(5.34) +≤ C∥u0∥2 +Ω0 + C∥u0∥2 +Ω0 = C∥u0∥2 +Ω0, +(5.49) +which shows (5.15). This concludes the proof of Lemma 5.2. +5.3 +Proof of Theorem 5.1 (The main stability estimate) +For the proof of Theorem 5.1, we will use some additional inequalities. For n = 1, . . . , N +and an, bn ≥ 0, we have +N +� +n=1 +b2 +n ≥ +� +N +� +n=1 +anbn +�2� +N +� +n=1 +a2 +n +�−1 +, +(5.50) +which comes from Cauchy-Schwarz inequality. Noting that k1 = t1, since t0 = 0, we have +N +� +n=1 +kn +tn += 1 + +N +� +n=2 +kn +tn +≤ 1 + +� tN +t1 +1 +t dt = 1 + log(tN/k1). +(5.51) +We are now ready to prove Theorem 5.1. +Proof. The proof idea is to derive lower bounds for separate terms on the left-hand sides +of the auxiliary stability estimates. These lower bounds will then be used to obtain the +34 + +main stability estimate. From the basic stability estimate (5.6), we have +C∥u0∥2 +Ω0 ≥ ∥u− +h,N∥2 +Ω0 + ∥[uh]0∥2 +Ω0 ≥ 1 +2(∥u− +h,N∥Ω0 + ∥[uh]0∥Ω0)2, +=⇒ +∥u− +h,N∥Ω0 + ∥[uh]0∥Ω0 ≤ C∥u0∥Ω0. +(5.52) +In the strong stability estimate (5.15), we may categorize the terms on the left-hand side +as either integral terms or jump terms. Since the treatment will be the same for both the +integral terms, we consider a generic integral term. We thus have from (5.15) that +C∥u0∥2 +Ω0 ≥ +N +� +n=1 +tn +� +In +∥w∥2 +Ω0 dt = +N +� +n=1 +tn +kn +� � +In +12 dt +�� � +In +∥w∥2 +Ω0 dt +� +≥ +N +� +n=1 +tn +kn +� � +In +∥w∥Ω0 dt +�2 +≥ +� +N +� +n=1 +kn +tn +�−1� +N +� +n=1 +� +In +∥w∥Ω0 dt +�2 +, +(5.53) +where we have used Cauchy-Schwarz inequality to obtain the second inequality. To obtain +the last inequality, we have used (5.50) with an = +� +kn/tn and bn = +� +tn/kn +� +In ∥w∥Ω0 dt. +From (5.53), we have +N +� +n=1 +� +In +∥w∥Ω0 dt ≤ C +� +N +� +n=1 +kn +tn +�1/2 +∥u0∥Ω0 ≤ C(1 + log(tN/k1))1/2∥u0∥Ω0, +(5.54) +where we have used (5.51) to obtain the last inequality. We move on to the jump terms. +From (5.15), we have +C∥u0∥2 +Ω0 ≥ +N +� +n=2 +tn +kn +∥[uh]n−1∥2 +Ω0 ≥ +� +N +� +n=2 +kn +tn +�−1� +N +� +n=2 +∥[uh]n−1∥Ω0 +�2 +, +(5.55) +where we have used (5.50) with an = +� +kn/tn and bn = +� +tn/kn∥[uh]n−1∥Ω0, to obtain the +last inequality. From (5.55), we have +N +� +n=2 +∥[uh]n−1∥Ω0 ≤ C +� +N +� +n=2 +kn +tn +�1/2 +∥u0∥Ω0 ≤ C(1 + log(tN/k1))1/2∥u0∥Ω0, +(5.56) +where we have used (5.51) to obtain the last inequality. By adding (5.52), (5.54) for both +integral terms in (5.15), and (5.56), and noting that log(tN/k1) ≥ 0, we may obtain the +main stability estimate (5.1). This concludes the proof of Theorem 5.1. +35 + +6 +A priori error analysis +To prove an a priori error estimate, we follow the methodology presented by Eriksson and +Johnson in [12, 13] and make only minor modifications to account for the CutFEM setting. +Theorem 6.1 (An optimal order a priori error estimate in ∥ · ∥Ω0 at the final time). Let u +be the solution of (2.5) and let uh be the finite element solution defined by (3.17). Then, +for q = 0, 1, we have that +∥u(tN) − u− +h,N∥Ω0 ≤ CN max +1≤n≤N +� +k2q+1 +n +∥ ˙u(2q+1)∥Ω0,In + hp+1∥Dp+1 +x +u∥Ω0,In +� +, +(6.1) +where ∥ · ∥Ω0 = ∥ · ∥L2(Ω0), CN = C(log(tN/kN) + 1)1/2, where C > 0 is a constant, +kn = tn−tn−1, ∥w∥Ω0,In = maxt∈In ∥w∥Ω0, ˙u(2q+1) = ∂2q+1u/∂t2q+1, h is the largest diameter +of a simplex in T0 ∪ TG, and Dx denotes the derivative with respect to space. +Proof. Let e = u − uh denote the approximation error. We start by splitting the error +using the interpolant ˜u = ˜InRnu ∈ Vh, where ˜In is the temporal interpolation operator +defined by (B.13), and Rn is the Ritz projection operator defined by (4.16), +e = u − uh ±˜u +���� +=0 += (u − ˜u) +� �� � +=ρ ++ (˜u − uh) +� +�� +� +=θ += ρ + θ. +(6.2) +We then consider +∥u(tN) − u− +h,N∥Ω0 = ∥e− +N∥Ω0 = ∥(ρ + θ)− +N∥Ω0 ≤ ∥ρ− +N∥Ω0 +� �� � +The ρ-part ++ ∥θ− +N∥Ω0 +� �� � +The θ-part +, +(6.3) +where we treat the ρ-part and the θ-part separately. +Estimation of the ρ-part +Here, we consider the term in (6.3) involving ρ. First we note that since ρ = u − ˜u = +u − ˜InRnu, we have for n = 1, . . . , N, +∥ρ− +n ∥Ω0 = ∥(u − ˜InRnu)− +n ∥Ω0 = ∥u− +n − (˜InRnu)− +n ∥Ω0 +(B.13a) += +∥u− +n − (Rnu)− +n ∥Ω0 = ∥u− +n − Rnu− +n ∥Ω0 +≤ ∥u − Rnu∥Ω0,In, +(6.4) +where ∥w∥Ω0,In = maxt∈In ∥w∥Ω0 = maxt∈In ∥w(·, t)∥Ω0. The ρ-part in (6.3) is thus +∥ρ− +N∥Ω0 +(6.4) +≤ ∥u − Rnu∥Ω0,IN ≤ max +1≤n≤N +� +∥u − Rnu∥Ω0,In +� +. +(6.5) +Estimation of the θ-part +Here, we consider the term in (6.3) involving θ. We first note that from the Galerkin +orthogonality (4.65), we have +Bh(θ, zh) = −Bh(ρ, zh), +(6.6) +36 + +where we have used e = ρ + θ and chosen v = zh. Since θ = ˜u − uh ∈ Vh is a permissible +test function for the discrete dual problem (4.66), we may take v = θ in (4.66) and choose +z+ +h,N = θ− +N to obtain +Bh(θ, zh) = ∥θ− +N∥2 +Ω0. +(6.7) +Combining (6.6) and (6.7), and using Lemma 4.6, we obtain the error representation +∥θ− +N∥2 +Ω0 = − Bh(ρ, zh) += +N +� +n=1 +� +In +(ρ, ˙zh)Ω0 dt +� +�� +� +=I ++ +N +� +n=1 +− +� +In +Ah,t(ρ, zh) dt +� +�� +� +=II ++ +N−1 +� +n=1 +(ρ− +n , [zh]n)Ω0 +� +�� +� +=III ++ −(ρ− +N, z− +h,N)Ω0 +� +�� +� +=IV +. +(6.8) +We consider the terms on the right-hand side of (6.8) separately, starting with the first sum. +We note that for q = 0, the first terms vanishes, since for every x ∈ Ω0, zh(x, ·)|In ∈ P0(In) +which means that ˙zh(x, ·)|In = 0. For n = 1, . . . , N, we thus have +I +���� +q=0 += +� +In +(ρ, ˙zh)Ω0 dt = 0, +(6.9) +and +I +���� +q≥1 += +� +In +(ρ, ˙zh)Ω0 dt = +� +In +� +Ω0 +(u − ˜InRnu) ˙zh dx dt += +� +Ω0 +� � +In +u ˙zh dt − +� +In +˜InRnu +˙zh +���� +∈V n,q−1 +h +dt +� +dx +(B.13b) += +� +Ω0 +� � +In +u ˙zh dt − +� +In +Rnu ˙zh dt +� +dx += +� +In +� +Ω0 +(u − Rnu) ˙zh dx dt = +� +In +(u − Rnu, ˙zh)Ω0 dt +≤∥u − Rnu∥Ω0,In +� +In +∥ ˙zh∥Ω0 dt. +(6.10) +For n = 1, . . . , N, the terms in the second sum on the right-hand side of (6.8) is +II = − +� +In +Ah,t(ρ, zh) dt = − +� +In +An(u − ˜u, zh) dt = − +� +In +An(u, zh) − An(˜u, zh) dt +(4.16) += +− +� +In +An(Rnu, zh) − An(˜u, zh) dt = +� +In +−An(Rnu − ˜u, zh) dt +(4.25) += +� +In +(Rnu − ˜u, ∆nzh)Ω0 dt. +(6.11) +37 + +The subsequent treatment of II is different for q = 0 and q ≥ 1. For q = 0, we continue by +writing +II +���� +q=0 += +� +In +(Rnu − ˜u, ∆nzh)Ω0 dt ≤ +� +In +∥Rnu − ˜u∥Ω0∥∆nzh∥Ω0 dt +≤ ∥Rnu − ˜u∥Ω0,In +� +In +∥∆nzh∥Ω0 dt. +(6.12) +For q ≥ 1, we may instead continue by writing +II +���� +q≥1 += +� +In +(Rnu − ˜u, ∆nzh)Ω0 dt += +� +In +(Rnu − ˜u, ∆n +� +z− +h,n + +� t +tn +˙zh ds +� +)Ω0 dt += +� +In +(Rnu − ˜u, ∆nz− +h,n)Ω0 dt +� +�� +� +=II.1 ++ +� +In +(Rnu − ˜u, ∆n +� � t +tn +˙zh ds +� +)Ω0 dt +� +�� +� +=II.2 +. +(6.13) +We consider II.1 and II.2 separately, starting with the former. +II.1 = +� +In +(Rnu − ˜u, ∆nz− +h,n)Ω0 dt = +� +In +� +Ω0 +(Rnu − ˜InRnu)∆nz− +h,n dx dt += +� +Ω0 +� � +In +Rnu∆nz− +h,n dt − +� +In +˜InRnu ∆nz− +h,n +� �� � +∈V n,q−1 +h +dt +� +dx +(B.13b) += +� +Ω0 +� � +In +Rnu∆nz− +h,n dt − +� +In +Rnu∆nz− +h,n dt +� +�� +� +=0 +� +dx += 0. +(6.14) +For q = 1, we may treat II.2 in the following way: +II.2 +���� +q=1 += +� +In +(Rnu − ˜u, ∆n +� � t +tn +˙zh ds +� +)Ω0 dt +q=1 += +� +In +(Rnu − ˜u, ∆n{(t − tn) ˙zh})Ω0 dt +(4.25) += +� +In +−An(Rnu − ˜u, (t − tn) ˙zh) dt +(4.25) += +� +In +(∆n{Rnu − ˜u}, (t − tn) ˙zh)Ω0 dt +≤ +� +In +|t − tn|∥∆n{Rnu − ˜u}∥Ω0∥ ˙zh∥Ω0 dt +≤ kn∥∆n{Rnu − ˜u}∥Ω0,In +� +In +∥ ˙zh∥Ω0 dt. +(6.15) +38 + +For n = 1, . . . , N − 1, the terms in the third sum on the right-hand side of (6.8) is +III = (ρ− +n , [zh]n)Ω0 ≤ ∥ρ− +n ∥Ω0∥[zh]n∥Ω0 +(6.4) +≤ ∥u − Rnu∥Ω0,In∥[zh]n∥Ω0. +(6.16) +The fourth term on the right-hand side of (6.8) is treated in the exact same way. +IV = −(ρ− +N, z− +h,N)Ω0 ≤ ∥ρ− +N∥Ω0∥z− +h,N∥Ω0 +(6.4) +≤ ∥u − Rnu∥Ω0,IN∥z− +h,N∥Ω0. +(6.17) +Summing up what we have for q = 0, i.e. inserting (6.9), (6.12), (6.16), and (6.17) into +(6.8), we obtain +∥θ− +N∥2 +Ω0 +���� +q=0 +≤ +N +� +n=1 +∥Rnu − ˜u∥Ω0,In +� +In +∥∆nzh∥Ω0 dt ++ +N−1 +� +n=1 +∥u − Rnu∥Ω0,In∥[zh]n∥Ω0 ++ ∥u − Rnu∥Ω0,IN∥z− +h,N∥Ω0 +≤ max +1≤n≤N +� +∥u − Rnu∥Ω0,In + ∥Rnu − ˜u∥Ω0,In +� +× +× +� +N +� +n=1 +� +In +∥∆nzh∥Ω0 dt + +N−1 +� +n=1 +∥[zh]n∥Ω0 + ∥z− +h,N∥Ω0 +� +≤ CNF0(u)∥θ− +N∥Ω0, +(6.18) +where F0(u) is the factor with the max-function. To obtain the second inequality, we have +taken the maximum over 1 ≤ n ≤ N of all the left-hand factors in every term on the +left-hand side. To obtain the last inequality, we have used the stability estimate (5.4) with +z+ +h,N = θ− +N. Analogously, summing up what we have for q = 1, i.e. inserting (6.10), (6.13), +(6.16), and (6.17) into (6.8), where we have inserted (6.14) and (6.15) into (6.13), we obtain +39 + +∥θ− +N∥2 +Ω0 +���� +q=1 +≤ +N +� +n=1 +∥u − Rnu∥Ω0,In +� +In +∥ ˙zh∥Ω0 dt ++ +N +� +n=1 +kn∥∆n{Rnu − ˜u}∥Ω0,In +� +In +∥ ˙zh∥Ω0 dt dt ++ +N−1 +� +n=1 +∥u − Rnu∥Ω0,In∥[zh]n∥Ω0 ++ ∥u − Rnu∥Ω0,IN∥z− +h,N∥Ω0 +≤ max +1≤n≤N +� +∥u − Rnu∥Ω0,In + kn∥∆n{Rnu − ˜u}∥Ω0,In +� +× +× +� +2 +N +� +n=1 +� +In +∥ ˙zh∥Ω0 dt + +N−1 +� +n=1 +∥[zh]n∥Ω0 + ∥z− +h,N∥Ω0 +� +≤ CNF1(u)∥θ− +N∥Ω0, +(6.19) +where F1(u) is the factor with the max-function. To obtain the second inequality, we have +taken the maximum over 1 ≤ n ≤ N of all the left-hand factors in every term on the +left-hand side. To obtain the last inequality, we have used the stability estimate (5.4) with +z+ +h,N = θ− +N. Dividing both sides in (6.18) and (6.19) by ∥θ− +N∥Ω0, the estimation of the θ-part +for q = 0, 1, finally becomes +∥θ− +N∥Ω0 ≤ CNFq(u). +(6.20) +Estimation of Fq(u) +Now we need an estimate for Fq(u). From (6.18) and (6.19), we note that we may write +Fq(u) for q = 0, 1, as +Fq(u) = max +1≤n≤N +� +∥u − Rnu∥Ω0,In +� +�� +� +=I ++(1 − q) ∥Rnu − ˜u∥Ω0,In +� +�� +� +=II ++ qkn ∥∆n{Rnu − ˜u}∥Ω0,In +� +�� +� +=III +� +. +(6.21) +We treat the terms separately, starting with the first for which we use Lemma 4.2: +I = ∥u − Rnu∥Ω0,In +(4.18) +≤ CIhp+1∥Dp+1 +x +u∥Ω0,In, +(6.22) +where CI > 0 is a constant. The second term on the right-hand side of (6.21) is +II = ∥Rnu − ˜u∥Ω0,In = ∥Rnu − ˜InRnu + ˜Inu − ˜Inu + u − u∥Ω0,In +≤ ∥u − Rnu∥Ω0,In + ∥˜In(u − Rnu)∥Ω0,In + ∥u − ˜Inu∥Ω0,In +≤ CII +� +hp+1∥Dp+1 +x +u∥Ω0,In + kq+1 +n +∥ ˙u(q+1)∥Ω0,In +� +, +(6.23) +40 + +where CII > 0 is a constant. We have used (6.22) on the first term in the second row of +(6.23). On the second term, we have first used the boundedness of ˜In from Lemma B.3, +and then applied (6.22). On the last term in the second row of (6.23), we have used (B.14) +from Lemma B.3. We move on to the third term in (6.21). Note that this term is only +present for q = 1. To treat it we will use the following: For ψ ∈ H2(Ω0) and v ∈ Vh,n we +have that +(−∆nRnψ, v)Ω0 +(4.25) += +An(Rnψ, v) +(4.16) += +An(ψ, v) +(A.12) += +(−∆ψ, v)Ω0 +=⇒ +∥∆nRnψ∥2 +Ω0 = (−∆ψ, −∆nRnψ) ≤ ∥∆ψ∥Ω0∥∆nRnψ∥Ω0, +(6.24) +where we have used the definitions of ∆n and Rn, and Corollary A.1, i.e., partial integration +in broken Sobolev spaces with bilinear forms A. Dividing both sides by a factor ∥∆nRnψ∥Ω0 +gives +∥∆nRnψ∥Ω0 ≤ ∥∆ψ∥Ω0. +(6.25) +The third term in (6.21) is +III = ∥∆n{Rnu − ˜u}∥Ω0,In = ∥∆n{Rnu − ˜InRnu}∥Ω0,In +(B.14) +≤ +Ck2 +n∥∆n{∂2 +t Rnu}∥Ω0,In += Ck2 +n∥∆nRn ˙u(2)∥Ω0,In +(6.25) +≤ Ck2 +n∥∆ ˙u(2)∥Ω0,In = CIIIk2 +n∥ ˙u(3)∥Ω0,In, +(6.26) +where CIII > 0 is a constant. With the insertion of (6.22), (6.23), and (6.26) in (6.21), we +get for q = 0, 1 +Fq(u) ≤ max +1≤n≤N +� +CIhp+1∥Dp+1 +x +u∥Ω0,In ++ (1 − q)CII +� +hp+1∥Dp+1 +x +u∥Ω0,In + kq+1 +n +∥ ˙u(q+1)∥Ω0,In +� ++ qknCIIIk2 +n∥ ˙u(3)∥Ω0,In +� += max +1≤n≤N +� +(CI + (1 − q)CII)hp+1∥Dp+1 +x +u∥Ω0,In ++ (1 − q)CIIkq+1 +n +∥ ˙u(q+1)∥Ω0,In + qCIIIk3 +n∥ ˙u(3)∥Ω0,In +� +≤ C max +1≤n≤N +� +hp+1∥Dp+1 +x +u∥Ω0,In + k2q+1 +n +∥ ˙u(2q+1)∥Ω0,In +� +, +(6.27) +where C > 0 is a constant. +The final step +To obtain the desired error estimate, we insert the estimations of the ρ-part (6.5) and the +41 + +θ-part (6.20) in (6.3) to obtain +∥u(tN) − u− +h,N∥Ω0 ≤ ∥ρ− +N∥Ω0 + ∥θ− +N∥Ω0 +(6.5),(6.20) +≤ +max +1≤n≤N +� +∥u − Rnu∥Ω0,In +� +� +�� +� +≤Fq(u) ++CNFq(u) +≤ (1 + CN)Fq(u) ≤ CNFq(u) +(6.27) +≤ CN max +1≤n≤N +� +k2q+1 +n +∥ ˙u(2q+1)∥Ω0,In + hp+1∥Dp+1 +x +u∥Ω0,In +� +, +(6.28) +where we have used the estimation of Fq(u), given by (6.27). This concludes the proof of +Theorem 6.1. +42 + +7 +Numerical results +Here we present numerical results for the implementation of (3.17) for the following model +problem in one spatial dimension: +� +� +� +� +� +˙u − uxx = f +in (0, 1) × (0, 3], +u = 0 +on {0, 1} × (0, 3], +u = sin2(πx) +in (0, 1) × {0}, +(7.1a) +where +f(x, t) = −(1 +2 sin2(πx) + 2π2 cos(2πx))e−t/2. +(7.1b) +The exact solution of (7.1) is +u = sin2(πx)e−t/2. +(7.2) +To obtain the finite element solution uh, we have used piecewise linear basis functions in +space, and in time we have used the discontinuous Galerkin methods dG(0) and dG(1). In +other words, the finite element method defined by (3.17) for p = 1 and q = 0, 1. +The right-hand side integrals involving f have been approximated locally by quadrature +over the space-time prisms: first quadrature in time, then quadrature in space. In space, +three-point Gauss-Legendre quadrature has been used, thus resulting in a quadrature error +of the sixth order, i.e., quadrature error ∼ h6. For dG(0) in time, the midpoint rule has +been used, thus resulting in a quadrature error of the second order, i.e., quadrature error ∼ +k2. For dG(1) in time, three-point Lobatto quadrature has been used, thus resulting in a +quadrature error of the fourth order, i.e., quadrature error ∼ k4. +The velocity µ of the overlapping mesh has been 0 on every subinterval In = (tn−1, tn] +in accordance with dG(0) mesh movement. The stabilization parameter γ = 10 in all +simulations used to obtain the numerical results presented in this section. +7.1 +Illustrative examples +The solution is presented for two different pairs of equidistant space-time discretizations, +where G is immersed in Ω0 for all t ∈ [0, 3], and the length of G is 0.25. First, we consider +the coarse case: (22 + 7) × 10, i.e., 22 nodes for T0, 7 nodes for TG, and 10 time steps on +the interval (0, 3]. Second, we consider the fine case: (44 + 14) × 30, i.e., 44 nodes for T0, +14 nodes for TG, and 30 time steps on the interval (0, 3]. We present the solution for these +two cases for three different velocities (µ) of the overlapping mesh TG: µ = 0, µ = 0.1, and +µ = 1 +2 sin( 2πt +3 ). We consider these six cases in Figure 5–7 below. +43 + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 5: The coarse case (left) and the fine case (right) for µ = 0. The background mesh +T0 is blue and its nodes are marked with small blue circles. The overlapping mesh TG is +red and its nodes are marked with small red crosses. The space-time boundary ¯Γn between +the two meshes is black. +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 6: The coarse case (left) and the fine case (right) for µ = 0.1. The background +mesh T0 is blue and its nodes are marked with small blue circles. The overlapping mesh +TG is red and its nodes are marked with small red crosses. The space-time boundary ¯Γn +between the two meshes is black. +44 + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 7: The coarse case (left) and the fine case (right) for µ = 1 +2 sin( 2πt +3 ). The background +mesh T0 is blue and its nodes are marked with small blue circles. The overlapping mesh +TG is red and its nodes are marked with small red crosses. The space-time boundary ¯Γn +between the two meshes is black. +7.2 +Convergence study +The error is the L2(Ω0)-norm of the difference between the exact and the finite element +solution at the final time, i.e., ∥e(T)∥L2(Ω0) = ∥u(T) − u− +h,N∥Ω0. The integral in the L2- +norm has been approximated by composite three-point Gauss-Legendre quadrature, thus +resulting in a quadrature error of the third order , i.e., quadrature error ∼ (h6)1/2 = h3. +We present results displaying the error’s dependence on both the time step k and the +mesh size h, separately, for different constant values of µ. Besides the computed error, +each error convergence plot contains a line segment that has been computed with the linear +least squares method to fit the error data. This line segment is referred to as the LLS of +the error. The slope of the LLS of the error is given in the caption beneath each error +convergence figure. Reference slopes are also included. Both T0 and TG are uniform meshes, +with mesh sizes h0 and hG, respectively. The temporal discretization is also uniform with +time step k for each instance. Furthermore, the final time is set to T = 1, the length of +the overlapping mesh TG is 0.25 and the initial position of TG is the spatial interval [0.125, +0.125 + 0.25]. In the plots with the error versus k, the mesh sizes h = h0 = hG have been +fixed at a sufficiently small value so that the error’s dependence on h has been negligible +in comparison with its dependence on k, and vice versa in the plots with the error versus +h = h0 ≥ hG. The fixed values for the mesh size and the time step have been obtained by +trial and error. +45 + +7.3 +dG(0) in time +7.3.1 +Illustrative examples +Figure 8–13 display the dG(0) finite element solution uh for the six different cases (Figure 5– +7) from two different angles. +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 8: The dG(0) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 0 (2D view). +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +Figure 9: The dG(0) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 0 (3D view). +46 + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 10: The dG(0) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 0.1 (2D view). +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +Figure 11: The dG(0) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 0.1 (3D view). +47 + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 12: The dG(0) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 1 +2 sin( 2πt +3 ) (2D view). +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +Figure 13: The dG(0) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 1 +2 sin( 2πt +3 ) (3D view). +7.3.2 +Convergence study +Figure 14 and Figure 15 display two error convergence plots each. The left plots show the +error versus k, and the right plots show the error versus h = h0 ≥ hG. The velocity is +µ = 0 in Figure 14 and µ = 0.6 in Figure 15. In the plots displaying the error versus k, +the mesh sizes have been fixed at h = h0 = hG = 10−3. Analogously, in the plots with the +error versus h, the time step has been fixed at k = 10−4. +48 + +10-2 +10-1 +100 +10-8 +10-7 +10-6 +10-5 +10-4 +10-3 +10-2 +10-1 +10-3 +10-2 +10-1 +10-9 +10-8 +10-7 +10-6 +10-5 +10-4 +10-3 +10-2 +Figure 14: Error convergence for dG(0) when µ = 0. Left: The error versus k. The slope +of the LLS of the error is 1.0064. Right: The error versus h. The slope of the LLS of the +error is 2.0559. +10-2 +10-1 +100 +10-8 +10-7 +10-6 +10-5 +10-4 +10-3 +10-2 +10-1 +10-3 +10-2 +10-1 +10-9 +10-8 +10-7 +10-6 +10-5 +10-4 +10-3 +10-2 +Figure 15: Error convergence for dG(0) when µ = 0.6. Left: The error versus k. The +slope of the LLS of the error is 1.0064. Right: The error versus h. The slope of the LLS +of the error is 2.0501. +The slopes of the LLS of the error for different values of the velocity µ are presented in +Table 2. +49 + +Slope of the LLS of the error +µ +error versus k (points used) +error versus h (points used) +0 +1.0064 (1–15) +2.0559 (1–11) +0.1 +1.0064 (1–15) +2.0486 (1–11) +0.2 +1.0064 (1–15) +2.0421 (1–11) +0.4 +1.0064 (1–15) +2.0422 (1–11) +0.6 +1.0064 (1–15) +2.0501 (1–11) +Table 2: The slope of the LLS of the error versus k and h for different values of µ for +dG(0). +50 + +7.4 +dG(1) in time +7.4.1 +Illustrative examples +Figure 16–21 display the dG(1) finite element solution uh for the six different cases (Fig- +ure 5–7) from two different angles. +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 16: The dG(1) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 0 (2D view). +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +Figure 17: The dG(1) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 0 (3D view). +51 + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 18: The dG(1) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 0.1 (2D view). +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +Figure 19: The dG(1) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 0.1 (3D view). +52 + +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.5 +1 +1.5 +2 +2.5 +3 +Figure 20: The dG(1) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 1 +2 sin( 2πt +3 ) (2D view). +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +0 +0.1 +0.2 +0.3 +0 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +1 +2 +3 +0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +1 +Figure 21: The dG(1) finite element solution uh for the coarse case (left) and the fine case +(right) for µ = 1 +2 sin( 2πt +3 ) (3D view). +7.4.2 +Convergence study +Figure 22 and Figure 23 display two error convergence plots each. The left plots show the +error versus k, and the right plots show the error versus h = h0 ≥ hG. The velocity is +µ = 0 in Figure 22 and µ = 0.6 in Figure 23. In the plots displaying the error versus k, +the mesh sizes have been fixed at h = h0 = hG = 5 · 10−5. Analogously, in the plots with +the error versus h, the time step has been fixed at k = 10−3. +53 + +10-2 +10-1 +100 +10-9 +10-8 +10-7 +10-6 +10-5 +10-4 +10-3 +10-2 +10-3 +10-2 +10-1 +10-9 +10-8 +10-7 +10-6 +10-5 +10-4 +10-3 +10-2 +Figure 22: Error convergence for dG(1) when µ = 0. Left: The error versus k. The slope +of the LLS of the error is 2.7890. Right: The error versus h. The slope of the LLS of the +error is 2.0122. +10-2 +10-1 +100 +10-9 +10-8 +10-7 +10-6 +10-5 +10-4 +10-3 +10-2 +10-3 +10-2 +10-1 +10-9 +10-8 +10-7 +10-6 +10-5 +10-4 +10-3 +10-2 +Figure 23: Error convergence for dG(1) when µ = 0.6. Left: The error versus k. The +slope of the LLS of the error is 2.8437. Right: The error versus h. The slope of the LLS +of the error is 2.0082. +The slopes of the LLS of the error for different values of the velocity µ are presented in +Table 3. +54 + +Slope of the LLS of the error +µ +error versus k (points used) +error versus h (points used) +0 +2.7890 (9–12) +2.0122 (1–15) +0.1 +2.9142 (9–12) +2.0058 (1–15) +0.2 +2.8493 (9–12) +2.0024 (1–15) +0.4 +2.6994 (9–12) +2.0024 (1–15) +0.6 +2.8437 (9–12) +2.0082 (1–15) +Table 3: The slope of the LLS of the error versus k and h for different values of µ for +dG(1). +7.5 +Comparison with analytic results +From the a priori error estimate in Theorem 6.1 we have that the error has the following +dependence on the time step k and mesh size h: +∥e(T)∥L2(Ω0) ∼ k2q+1 + hp+1. +(7.3) +Thus with p = 1, Theorem 6.1 says that +∥e(T)∥L2(Ω0) ∼ k1 + h2, +with dG(0) in time, +(7.4) +∥e(T)∥L2(Ω0) ∼ k3 + h2, +with dG(1) in time. +(7.5) +The slopes of the LLS of the numerical error presented in Table 2 and 3 thus verify the +analytic error convergence orders from Theorem 6.1. +8 +Conclusions +We have presented a cut finite element method for a parabolic model problem on an over- +lapping mesh situation: one stationary background mesh and one discontinuously moving, +slabwise stationary overlapping mesh. We have applied the analysis framework presented +in [12, 13] to the method with natural modifications to account for the CutFEM setting. +The greatest difference and novelty in the presented analysis is the shift operator. The +main results of the analysis are basic and strong stability estimates and an optimal order +a priori error estimate. We have also presented numerical results for a parabolic problem +in one spatial dimension that verify the analytic error convergence orders. +55 + +A +Analytic tools +Lemma A.1 (A jump identity). Let ω+, ω− ∈ R and ω+ + ω− = 1, let [A] := A+ − A−, +and ⟨A⟩ := ω+A+ + ω−A−. We then have +[AB] = [A]⟨B⟩ + ⟨A⟩[B] + (ω− − ω+)[A][B]. +(A.1) +Proof. The three terms on the right-hand side of (A.1) are +[A]⟨B⟩ = (A+ − A−)(ω+B+ + ω−B−) += ω+A+B+ + ω−A+B− − ω+A−B+ − ω−A−B−, +(A.2) +⟨A⟩[B] = (ω+A+ + ω−A−)(B+ − B−) += ω+A+B+ − ω+A+B− + ω−A−B+ − ω−A−B−, +(A.3) +(ω− − ω+)[A][B] = (ω− − ω+)(A+ − A−)(B+ − B−) += (ω− − ω+)(A+B+ − A+B− − A−B+ + A−B−). +(A.4) +Adding these three expressions gives +[A]⟨B⟩ + ⟨A⟩[B] + (ω− − ω+)[A][B] += ω+A+B+ + ω−A+B− − ω+A−B+ − ω−A−B− ++ ω+A+B+ − ω+A+B− + ω−A−B+ − ω−A−B− ++ ω−A+B+ − ω−A+B− − ω−A−B+ + ω−A−B− +− ω+A+B+ + ω+A+B− + ω+A−B+ − ω+A−B−, +(A.5) +which after cancellation of most of the terms yields +(ω+ + ω−)A+B+ − (ω+ + ω−)A−B− = A+B+ − A−B− = [AB]. +(A.6) +Lemma A.2 (Partial integration in broken Sobolev spaces). For d = 1, 2, or 3, let Ω ⊂ Rd +be a bounded domain and let Γ ⊂ Ω be a continuous manifold of codimension 1 that +partitions Ω into the subdomains Ω1, · · · , ΩN. For ψ ∈ H2(Ω) and v ∈ H1 +0(Ω1, · · · , ΩN), +we have that +(−∆ψ, v)Ω = +N +� +i=1 +(∇ψ, ∇v)Ωi − (⟨∂nψ⟩, [v])Γ. +(A.7) +Proof. Using the partition of Ω and Green’s first identity, the left-hand side of (A.7) is +(−∆ψ, v)Ω = +N +� +i=1 +(−∆ψ, v)Ωi = +N +� +i=1 +� +(∇ψ, ∇v)Ωi − ((∂nψ)i, vi)∂Ωi +� +. +(A.8) +56 + +Letting γ ⊂ Γ denote the common boundary between two subdomains, and noting that +the γ’s form a partition of Γ, the sum of the boundary terms is +N +� +i=1 +((∂nψ)i, vi)∂Ωi = +N +� +i=1 +� +((∂nψ)i, vi)∂Ωi∩∂Ω +� +�� +� +=0, v|∂Ω=0 ++((∂nψ)i, vi)∂Ωi∩Γ +� += +� +γ +� +((∂nψ)+, v+)γ + ((∂nψ)−, v−)γ +� += +� +Γ +n+ · (∇ψv)+ + n− · (∇ψv)− ds += +� +Γ +n · (∇ψv)+ − n · (∇ψv)− ds = +� +Γ +[∂nψv] ds +(A.1) += +� +Γ +[∂nψ] +� �� � +=0 +⟨v⟩ + ⟨∂nψ⟩[v] + (ω− − ω+) [∂nψ] +� �� � +=0 +[v] ds += (⟨∂nψ⟩, [v])Γ. +(A.9) +In the penultimate equality, we have used Lemma A.1 and that [∂nψ]|Γ = 0 in L2(Γ) which +follows from the regularity of ψ. This shows (A.7). +Consider the domain partition and its corresponding broken Sobolev space presented in +the premise of Lemma A.2. We define the symmetric bilinear form A that generalizes the +appearence of Ah,t, defined by (4.2), to this setting by +A(w, v) := +N +� +i=1 +(∇w, ∇v)Ωi − (⟨∂nw⟩, [v])Γ − (⟨∂nv⟩, [w])Γ ++ (γh−1 +K [w], [v])Γ + ([∇w], [∇v])ΩO, +(A.10) +where we just let h−1 +K be some spatially dependent function of sufficient regularity and ΩO +be some union of subsets of subdomains. The specifics of h−1 +K and ΩO are of course taken +to be the natural ones when restricting A to Ah,t. By introducing A to Lemma A.2, we +get +(−∆ψ, v)Ω +(A.7) += +N +� +i=1 +(∇ψ, ∇v)Ωi − (⟨∂nψ⟩, [v])Γ += +N +� +i=1 +(∇ψ, ∇v)Ωi − (⟨∂nψ⟩, [v])Γ − (⟨∂nv⟩, [ψ] +���� +=0 +)Γ ++ (γh−1 +K +[ψ] +���� +=0 +, [v])Γ + ([∇ψ] +���� +=0 +, [∇v])ΩO +(A.10) += A(ψ, v), +(A.11) +57 + +where [ψ]|Γ = 0 follows from Sobolev’s inequality, i.e., ψ ∈ C(Ω) for d = 1, 2, 3, and +[∇ψ]|ΩO = 0 since (∇ψ)+ = (∇ψ)− on ΩO for a non-discrete function such as ψ. We +present this result as the following corollary: +Corollary A.1 (Partial integration in broken Sobolev spaces with bilinear forms A). For +d = 1, 2, or 3, let Ω ⊂ Rd be a bounded domain and let Γ ⊂ Ω be a continuous manifold of +codimension 1 that partitions Ω into the subdomains Ω1, · · · , ΩN. For this setting, let the +symmetric bilinear form A be defined by (A.10). For ψ ∈ H2(Ω) and v ∈ H1 +0(Ω1, · · · , ΩN), +we have that +(−∆ψ, v)Ω = A(ψ, v). +(A.12) +Lemma A.3 (A scaled trace inequality for domain partitioning manifolds of codimension +1). For d = 1, 2, or 3, let Ω ⊂ Rd be a bounded domain with diameter L, i.e., L = +diam(Ω) = supx,y∈Ω |x − y|. Let Γ ⊂ Ω be a continuous manifold of codimension 1 that +partitions Ω into N subdomains. Then there exists a constant C > 0 such that +∥v∥2 +Γ ≤ C +� +L−1∥v∥2 +Ω + L∥∇v∥2 +Ω +� +, +∀v ∈ H1(Ω). +(A.13) +Proof. If (A.13) holds for the case N = 2, then that result may be applied repeatedly to +show (A.13) for N > 2. We thus assume that Γ partitions Ω into two subdomains denoted +Ω1 and Ω2 with diameters L1 and L2, respectively. From the regularity assumptions on v, +we have for i = 1, 2, that v ∈ H1(Ωi) and thus +∥v∥2 +Γ ≤ ∥v∥2 +∂Ωi ≤ Ci +� +L−1 +i ∥v∥2 +Ωi + Li∥∇v∥2 +Ωi +� +, +(A.14) +where the first inequality follows from Γ ⊂ ∂Ωi, and the second is a standard scaled trace +inequality. Now consider the sum L1/L + L2/L. We have that +1 ≤ L1 +L + L2 +L ≤ 2, +(A.15) +where the lower bound follows from the triangle type inequality L ≤ L1+L2, and the upper +bound simply follows from Li ≤ L. We are now ready to show the desired inequality. The +left-hand side of (A.13) is +∥v∥2 +Γ = (1)∥v∥2 +Γ +(A.15) +≤ +�L1 +L + L2 +L +� +∥v∥2 +Γ = L1 +L ∥v∥2 +Γ + L2 +L ∥v∥2 +Γ +(A.14) +≤ +L1 +L C1 +� +L−1 +1 ∥v∥2 +Ω1 + L1∥∇v∥2 +Ω1 +� ++ L2 +L C2 +� +L−1 +2 ∥v∥2 +Ω2 + L2∥∇v∥2 +Ω2 +� +3rd +≤ C1 +L ∥v∥2 +Ω1 + C1L∥∇v∥2 +Ω1 + C2 +L ∥v∥2 +Ω2 + C2L∥∇v∥2 +Ω2 +≤ max +i {Ci}L−1 +� +∥v∥2 +Ω1 + ∥v∥2 +Ω2 +� ++ max +i {Ci}L +� +∥∇v∥2 +Ω1 + ∥∇v∥2 +Ω2 +� += C +� +L−1∥v∥2 +Ω + L∥∇v∥2 +Ω +� +, +(A.16) +58 + +where we have used that Li ≤ L to obtain the third inequality. This shows (A.13). +Let ΓK = ΓK(t) = K ∩ Γ(t). For t ∈ [0, T], j ∈ {0, G}, a simplex K ∈ Tj,Γ(t) = {K ∈ Tj : +K ∩ Γ(t) ̸= ∅}, and v ∈ H1(K), we have from Lemma A.3 that +∥v∥2 +ΓK ≤ C +� +h−1 +K ∥v∥2 +K + hK∥∇v∥2 +K +� +, +(A.17) +where hK is the diameter of K. For v ∈ P(K), i.e., a polynomial on K, we have the +standard inverse estimate +∥Dk +xv∥2 +K ≤ Ch−2 +K ∥Dk−1 +x +v∥2 +K, +for k ≥ 1. +(A.18) +For v ∈ Vh(t), we thus have +∥Dk +xv∥2 +ΓK +(A.17) +≤ +C +� +h−1 +K ∥Dk +xv∥2 +K + hK∥∇Dk +xv∥2 +K +� +(A.18) +≤ +C +� +h−1 +K ∥Dk +xv∥2 +K + hKCh−2 +K ∥Dk +xv∥2 +K +� += Ch−1 +K ∥Dk +xv∥2 +K, +(A.19) +which we present as the following corollary: +Corollary A.2 (A discrete spatial local inverse inequality for ΓK(t)). For t ∈ [0, T], +j ∈ {0, G}, K ∈ Tj,Γ(t) with diameter hK, let ΓK(t) = K ∩ Γ(t). Then, for k ≥ 0, there +exists a constant C > 0 such that +∥Dk +xv∥2 +ΓK(t) ≤ Ch−1 +K ∥Dk +xv∥2 +K, +∀v ∈ Vh(t). +(A.20) +Lemma A.4 (A discrete spatial inverse inequality for Γ(t)). Let the mesh-dependent norm +∥ · ∥−1/2,h,Γ(t) be defined by (4.7). Then, for t ∈ [0, T], there exists a constant CI > 0, +independent of h, such that +∥⟨∂¯nxv⟩∥2 +−1/2,h,Γ(t) ≤ CI +� +2 +� +i=1 +∥∇v∥2 +Ωi(t) + ∥[∇v]∥2 +ΩO(t) +� +, +∀v ∈ Vh(t). +(A.21) +Proof. To lighten the notation we omit the time dependence, which has no importance +here anyways. We follow the proof of the corresponding inequality in [2] with some modi- +fications. We use index j ∈ {0, G}, such that, if j = 0, then i = 1 and if j = G, then i = 2, +and let ΓKj = Kj ∩ Γ and Tj,Γ = {Kj ∈ Tj : Kj ∩ Γ ̸= ∅}. Note that for i = 1, 2, +� +K0∈T0,Γ +hK0∥vi∥2 +ΓK0 ≤ h +� +K0∈T0,Γ +∥vi∥2 +ΓK0 = h +� +KG∈TG,Γ +∥vi∥2 +ΓKG +≤ C +� +KG∈TG,Γ +hKG∥vi∥2 +ΓKG, +(A.22) +59 + +where we have used that ∪K0∈T0,ΓΓK0 = Γ = ∪KG∈TG,ΓΓKG to obtain the identity, and the +quasi-uniformity of T0 and TG to obtain the last inequality. Using the norm definition and +recalling that ⟨v⟩ = ω1v1 + ω2v2, the left-hand side of (A.21) is +∥⟨∂¯nxv⟩∥2 +−1/2,h,Γ = +� +K0∈T0,Γ +hK0∥⟨∂¯nxv⟩∥2 +ΓK0 +≤ +� +K0∈T0,Γ +2hK0∥ω1(∂¯nxv)1∥2 +ΓK0 + +� +K0∈T0,Γ +2hK0∥ω2(∂¯nxv)2∥2 +ΓK0 +(A.22) +≤ +� +K0∈T0,Γ +2hK0∥ω1(∂¯nxv)1∥2 +ΓK0 + C +� +KG∈TG,Γ +2hKG∥ω2(∂¯nxv)2∥2 +ΓKG. +(A.23) +Since ∂¯nxv = ¯nx · ∇v, we have +∥ωi(∂¯nxv)i∥2 +ΓKj ≤ +� +ΓKj +|ωi|2|¯nx|2 +� +�� +� +≤1 +|(∇v)i|2 ds ≤ ∥(∇v)i∥2 +ΓKj +(A.20) +≤ +Ch−1 +Kj∥(∇v)i∥2 +Kj. +(A.24) +Using (A.24) in (A.23), we get +∥⟨∂¯nxv⟩∥2 +−1/2,h,Γ ≤ +� +K0∈T0,Γ +2hK0Ch−1 +K0∥∇v∥2 +K0 + C +� +KG∈TG,Γ +2hKGCh−1 +KG∥∇v∥2 +KG +≤ C +� +K0∈T0,Γ +∥∇v∥2 +K0 + C +� +KG∈TG,Γ +∥∇v∥2 +KG += C +� +K0∈T0,Γ +� +∥∇v∥2 +K0∩Ω1 + ∥(∇v)1∥2 +K0∩Ω2 +� ++ C +� +KG∈TG,Γ +∥∇v∥2 +KG +≤ C∥∇v∥2 +Ω1 + C∥(∇v)1∥2 +ΩO + C∥∇v∥2 +Ω2 +≤ C +2 +� +i=1 +∥∇v∥2 +Ωi + C∥(∇v)1 ± (∇v)2∥2 +ΩO +≤ C +2 +� +i=1 +∥∇v∥2 +Ωi + C +� +∥[∇v]∥2 +ΩO + ∥(∇v)2∥2 +ΩO +� +≤ CI +� +2 +� +i=1 +∥∇v∥2 +Ωi + ∥[∇v]∥2 +ΩO +� +, +(A.25) +which is the desired estimate. +60 + +B +Interpolation +B.1 +Spatial interpolation operator +For the definition of the spatial interpolation operator, we recall the semi-discrete spaces +Vh,0 and Vh,G, defined by (3.7) and (3.8), respectively. We define the spatial interpolation +operators πh,0 : L1(Ω0) → Vh,0 and πh,G : L1(G) → Vh,G to be the Scott-Zhang interpolation +operators for the spaces Vh,0 and Vh,G, respectively, where the defining integrals are taken +over entire simplices. We point out that πh,G = πh,G(t), i.e., it is time-dependent, since G +is allowed to move around, but we omit the t to lighten the notation. For t ∈ [0, T], we +define the spatial interpolation operator Ih,t : L1(Ω0) → Vh(t) by +Ih,tv|Ω1(t) := πh,0v|Ω1(t), +Ih,tv|Ω2(t) := πh,Gv|Ω2(t). +(B.1) +The operator Ih,t is used in the proofs of Lemma 4.2 and Lemma 4.4, where energy estimates +of its interpolation error is used. We present and prove these estimates in two lemmas +below. +Lemma B.1 (An interpolation error estimate in |||·|||Ah,t). Let |||·|||Ah,t and Ih,t be defined +by (4.10) and (B.1), respectively. Then there exists a constant C > 0 such that +|||v − Ih,tv|||Ah,t ≤ Chp∥Dp+1 +x +v∥Ω0, +∀v ∈ Hp+1(Ω0). +(B.2) +Proof. To lighten the notation we omit the time dependence, which has no importance +here anyways. Letting w = v − Ih,tv, and using the definition of |||·|||Ah,t, the square of the +left-hand side of (B.2) is +|||w|||2 +Ah,t = +2 +� +i=1 +∥∇w∥2 +Ωi +� �� � += I ++ ∥⟨∂nw⟩∥2 +−1/2,h,Γ +� +�� +� += II ++ ∥[w]∥2 +1/2,h,Γ +� +�� +� += III ++ ∥[∇w]∥2 +ΩO +� +�� +� += IV +. +(B.3) +Letting wj = v − πh,jv, we treat each term in (B.3) separately, starting with the first: +I = ∥∇w∥2 +Ωi ≤ +� +K∈Tj,Ωi +∥∇wj∥2 +K +(B.4) +where we have expanded the spatial integration domain by going from Ωi to all simplices +61 + +in Tj that are cut by Ωi. The second term is +II = ∥⟨∂nw⟩∥2 +−1/2,h,Γ = +� +K0∈T0,Γ +hK0∥⟨∂nw⟩∥2 +ΓK0 +≤ +� +K0∈T0,Γ +2hK0∥ω1(∂nv)1∥2 +ΓK0 + +� +K0∈T0,Γ +2hK0∥ω2(∂nv)2∥2 +ΓK0 +(A.22) +≤ +� +K0∈T0,Γ +2hK0∥ω1(∂nv)1∥2 +ΓK0 + C +� +KG∈TG,Γ +2hKG∥ω2(∂nv)2∥2 +ΓKG +≤ C +2 +� +i=1 +� +Kj∈Tj,Γ +hKj∥ωi(∂nw)i∥2 +ΓKj +(A.24) +≤ +C +2 +� +i=1 +� +Kj∈Tj,Γ +hKj∥(∇w)i∥2 +ΓKj +(A.17) +≤ +C +2 +� +i=1 +� +Kj∈Tj,Γ +hKjC +� +h−1 +Kj∥∇wj∥2 +Kj + hKj∥D2 +xwj∥2 +Kj +� +≤ C +2 +� +i=1 +� +Kj∈Tj,Γ +� +∥∇wj∥2 +Kj + h2 +Kj∥D2 +xwj∥2 +Kj +� +≤ C +2 +� +i=1 +� +K∈Tj,Ωi +� +∥∇wj∥2 +K + h2 +K∥D2 +xwj∥2 +K +� +. +(B.5) +The third term in (B.3) receives the same treatment, and thus +III = ∥[w]∥2 +1/2,h,Γ = +� +K0∈T0,Γ +h−1 +K0∥[w]∥2 +ΓK0 +≤ +� +K0∈T0,Γ +2h−1 +K0∥w1∥2 +ΓK0 + +� +K0∈T0,Γ +2h−1 +K0∥w2∥2 +ΓK0 +(A.22) +≤ +� +K0∈T0,Γ +2h−1 +K0∥w1∥2 +ΓK0 + C +� +KG∈TG,Γ +2h−1 +KG∥w2∥2 +ΓKG +≤ C +2 +� +i=1 +� +Kj∈Tj,Γ +h−1 +Kj∥wi∥2 +ΓKj +(A.17) +≤ +C +2 +� +i=1 +� +Kj∈Tj,Γ +h−1 +KjC +� +h−1 +Kj∥wj∥2 +Kj + hKj∥∇wj∥2 +Kj +� +≤ C +2 +� +i=1 +� +Kj∈Tj,Γ +� +h−2 +Kj∥wj∥2 +Kj + ∥∇wj∥2 +Kj +� +≤ C +2 +� +i=1 +� +Kj∈Tj,Ωi +� +h−2 +K ∥wj∥2 +K + ∥∇wj∥2 +K +� +. +(B.6) +62 + +The fourth term in (B.3) is +IV = ∥[∇w]∥2 +ΩO = 2∥(∇w)1∥2 +ΩO + 2∥(∇w)2∥2 +ΩO = C +2 +� +i=1 +∥(∇w)i∥2 +ΩO += C +2 +� +i=1 +� +K∈T0,Γ +∥∇wj∥2 +K∩Ω2 ≤ C +2 +� +i=1 +� +K∈T0,Γ +∥∇wj∥2 +K ≤ C +2 +� +i=1 +� +K∈Tj,Ωi +∥∇wj∥2 +K. +(B.7) +We are now done with the separate treatments of all the terms in (B.3). Summing up what +we have, i.e., using (B.4)–(B.7) in (B.3), we get +|||w|||2 +Ah,t ≤ +2 +� +i=1 +� +K∈Tj,Ωi +∥∇wj∥2 +K +� +�� +� +≥ I ++ C +2 +� +i=1 +� +K∈Tj,Ωi +� +∥∇wj∥2 +K + h2 +K∥D2 +xwj∥2 +K +� +� +�� +� +≥ II ++ C +2 +� +i=1 +� +K∈Tj,Ωi +� +h−2 +K ∥wj∥2 +K + ∥∇wj∥2 +K +� +� +�� +� +≥ III ++ C +2 +� +i=1 +� +K∈Tj,Ωi +∥∇wj∥2 +K +� +�� +� +≥ IV +≤ C +2 +� +i=1 +� +K∈Tj,Ωi +� +h−2 +K ∥wj∥2 +K + ∥∇wj∥2 +K + h2 +K∥D2 +xwj∥2 +K +� += C +2 +� +i=1 +� +K∈Tj,Ωi +� +h−2 +K ∥v − πh,jv∥2 +K + ∥∇(v − πh,jv)∥2 +K ++ h2 +K∥D2 +x(v − πh,jv)∥2 +K +� +4th +≤ C +2 +� +i=1 +� +K∈Tj,Ωi +� +h−2 +K +� +h2(p+1)∥Dp+1 +x +v∥2 +N(K) +� ++ +� +h2p∥Dp+1 +x +v∥2 +N(K) +� ++ h2 +K +� +h2(p−1)∥Dp+1 +x +v∥2 +N(K) +�� +≤ Ch2p +2 +� +i=1 +� +K∈Tj,Ωi +∥Dp+1 +x +v∥2 +N(K) +≤ Ch2p +2 +� +i=1 +∥Dp+1 +x +v∥2 +Ω0 = Ch2p∥Dp+1 +x +v∥2 +Ω0, +(B.8) +where we have used standard local interpolation error estimates for Scott-Zhang interpo- +lation operators in the fourth step, thus N(K) denotes the neighborhood of simplex K, +i.e., all adjacent simplices to and including K. Taking the square root of both sides gives +(B.2). +63 + +Lemma B.2 (An interpolation error estimate in |||·|||An). For n = 1, . . . , N, let |||·|||An and +Ih,n = Ih,tn be defined by (4.32) and (B.1), respectively. Then there exists a constant C > 0 +such that +|||v − Ih,nv|||An ≤ Chp∥Dp+1 +x +v∥Ω0, +∀v ∈ Hp+1(Ω0). +(B.9) +Proof. Letting w = v − Ih,nv, and plugging w into |||·|||2 +An, we have +|||w|||2 +An = |||w|||2 +An + ∥⟨∂nw⟩∥2 +−1/2,h,Γn−1 += |||w|||2 +An + ∥⟨∂nw⟩∥2 +−1/2,h,Γn−1∩Γn + ∥⟨∂nw⟩∥2 +−1/2,h,Γn−1\Γn +≤ |||w|||2 +An + |||w|||2 +An + ∥⟨∂nw⟩∥2 +−1/2,h,Γn−1\Γn += C |||w|||2 +An + ∥⟨∂nw⟩∥2 +−1/2,h,Γn−1\Γn +(B.10) +The second term in the last row is initially treated in the same way as its counterpart in +(4.39). We thus partition Γn−1 \ Γn into `Γi := (Γn−1 \ Γn) ∩ Ωi,n, use the interdependent +indices i and j, and write `ΓiKj = Kj ∩ `Γi. Letting wj = v − πh,jv, we have +∥⟨∂nw⟩∥2 +−1/2,h,Γn−1\Γn +(4.39) +≤ C +� +`ΓiKj +hKj +� +∥(∇wj)+∥2 +`ΓiKj + ∥(∇wj)−∥2 +`ΓiKj +� += C +� +`ΓiKj +� +σ∈{+,−} +hKj∥(∇wj)σ∥2 +`ΓiKj +(A.17) +≤ +C +� +`ΓiKj +� +σ∈{+,−} +hKjC +� +h−1 +Kσ +j ∥∇wj∥2 +Kσ +j + hKσ +j ∥D2 +xwj∥2 +Kσ +j +� +≤ C +� +`ΓiKj +� +σ∈{+,−} +� +∥∇wj∥2 +Kσ +j + h2 +Kσ +j ∥D2 +xwj∥2 +Kσ +j +� +≤ C +2 +� +i=1 +� +K∈Tj,Ωi,n +� +∥∇wj∥2 +K + h2 +K∥D2 +xwj∥2 +K +� +(B.8) +≤ Ch2p∥Dp+1 +x +v∥2 +Ω0. +(B.11) +We thus have +|||w|||2 +An +(B.10) +≤ +C |||w|||2 +An + ∥⟨∂nw⟩∥2 +−1/2,h,Γn−1\Γn +(B.11) +≤ +C |||w|||2 +An + Ch2p∥Dp+1 +x +v∥2 +Ω0 += C |||v − Ih,nv|||2 +An + Ch2p∥Dp+1 +x +v∥2 +Ω0 +(B.2) +≤ Ch2p∥Dp+1 +x +v∥2 +Ω0 + Ch2p∥Dp+1 +x +v∥2 +Ω0 = Ch2p∥Dp+1 +x +v∥2 +Ω0, +(B.12) +which shows (B.9). +64 + +B.2 +Temporal interpolation operator +For q ∈ N and n = 1, . . . , N, we define the temporal interpolation operator ˜In = ˜In,q : +C(In) → Pq(In) by +(˜Inv)− +n = v− +n , +(B.13a) +and with the additional condition for q ≥ 1, +� +In +˜Invw dt = +� +In +vw dt, +∀w ∈ Pq−1(In). +(B.13b) +Lemma B.3 (An interpolation error estimate in ∥ · ∥Ω0,In). Let ˜In be defined by (B.13). +Then, for q = 0, 1, there exists a constant C > 0 such that for any function v : Ω0×In → R +with sufficient spatial and temporal regularity we have that ˜In is bounded and that +∥v − ˜Inv∥Ω0,In ≤ Ckq+1 +n +∥˙v(q+1)∥Ω0,In, +(B.14) +where ∥v∥Ω0,In = maxt∈In ∥v∥Ω0, kn = tn − tn−1, and ˙v(q+1) = ∂q+1v/∂tq+1. +Proof. We start by deriving explicit expressions for ˜Inv, involving w, for q = 0, 1. From +these explicit expressions, boundedness of ˜In will follow. We then use these expressions to +derive estimates for v − ˜Inv, from which (B.14) will be derived. +Case q = 0 +For q = 0, and any (x, t) ∈ Ω0 × In, +(˜Inv)(x, t) = v(x, t− +n ), +(B.15) +from (B.13a). The identity (B.15) indicates that ˜In is bounded for q = 0. This can easily +be seen by, e.g., assuming v to be continuous in time on In. Using (B.15), we have for any +(x, t) ∈ Ω0 × In +(v − ˜Inv)(x, t) = v(x, t) − v(x, t− +n ) = − +� tn +t +˙v(x, s) ds ≤ +� +In +|˙v(x, t)| dt. +(B.16) +By taking the squared L2(Ωi,n)-norm of v − ˜Inv, we obtain for any t ∈ In that +∥v − ˜Inv∥2 +Ω0 = +� +Ω0 +|(v − ˜Inv)(x, t)|2 dx +(B.16) +≤ +� +Ω0 +���� +� +In +|˙v(x, t)| dt +���� +2 +dx +≤ +� +Ω0 +kn +� +In +|˙v(x, t)|2 dt dx = kn +� +In +∥˙v∥2 +Ω0 dt +≤ k2 +n∥˙v∥2 +Ω0,In. +(B.17) +Taking the square root of both sides of (B.17) and the maximum over In of the left-hand +side, since (B.17) holds for all t ∈ In, proves (B.14) for q = 0. +65 + +Case q = 1 +For q = 1, the procedure is a little bit trickier. We start by considering the following +integral for any x ∈ Ω0: +� +In +(t − tn)∂t(˜Inv)(x, t) dt = ∂t(˜Inv)(x, t− +n ) +� +In +(t − tn) dt = −1 +2k2 +n∂t(˜Inv)(x, t− +n ), +(B.18) +where we have used the fact that ∂t(˜Inv) is constant in time on In for q = 1. We may also +use this fact to treat the integral as: +� +In +(t − tn)∂t(˜Inv)(x, t) dt = +� +In +(t − tn)(˜Inw)(x, t) − (˜Inv)(x, t− +n ) +t − tn +dt += +� +In +(˜Inv)(x, t) dt − +� +In +(˜Inv)(x, t− +n ) dt += +� +In +v(x, t) dt − +� +In +v(x, t− +n ) dt += +� +In +v(x, t) − v(x, t− +n ) dt, +(B.19) +where we have used (B.13) to obtain the last equality. By Taylor expansion in time of ˜Inv +at t− +n , we have for any (x, t) ∈ Ω0 × In +(˜Inv)(x, t) = (˜Inv)(x, t− +n ) + (t − tn)∂t(˜Inv)(x, t− +n ) += v(x, t− +n ) − 2(t − tn) +k2 +n +� +In +v(x, t) − v(x, t− +n ) dt, +(B.20) +where we have used (B.13a), and combined (B.18) with (B.19) to obtain the last equality. +The identity (B.20) indicates that ˜In is bounded for q = 1. This can be seen by, e.g., +assuming v to be continuously differentiable in time on In. Using (B.20), we have for any +(x, t) ∈ Ω0 × In that +(v − ˜Inv)(x, t) = v(x, t) − v(x, t− +n ) +� +�� +� +=I ++ 2(t − tn) +k2 +n +� +In +v(x, t) − v(x, t− +n ) dt +� +�� +� +=II +. +(B.21) +We consider the terms separately, starting with the first: +I = v(x, t) − v(x, t− +n ) = − +� tn +t +˙v(x, s) ds += +� tn +t +(s − t)∂2 +sv(x, s) ds − +� +(s − t)˙v(x, s) +�tn +t += +� tn +t +(s − t)∂2 +sv(x, s) ds − (tn − t)˙v(x, t− +n ). +(B.22) +66 + +The second term in (B.21) is +II = 2(t − tn) +k2 +n +� +In +v(x, t) − v(x, t− +n ) dt += 2(t − tn) +k2 +n +� � +(t − tn−1)(v(x, t) − v(x, t− +n )) +�tn +tn−1 +� +�� +� +=0 +− +� +In +(t − tn−1)∂t(v(x, t) − v(x, t− +n )) dt +� += 2(tn − t) +k2 +n +� +In +(t − tn−1)˙v(x, t) dt += 2(tn − t) +k2 +n +��(t − tn−1)2 +2 +˙v(x, t) +�tn +tn−1 +− +� +In +(t − tn−1)2 +2 +∂2 +t v(x, t) dt +� += 2(tn − t) +k2 +n +k2 +n +2 ˙v(x, t− +n ) − 2(tn − t) +k2 +n +� +In +(t − tn−1)2 +2 +∂2 +t v(x, t) dt += (tn − t)˙v(x, t− +n ) − (tn − t) +k2 +n +� +In +(t − tn−1)2∂2 +t v(x, t) dt. +(B.23) +Using the identities (B.22) and (B.23) in (B.21), we have +(v − ˜Inv)(x, t) = +� tn +t +(s − t)∂2 +sv(x, s) ds −(tn − t)˙v(x, t− +n ) + (tn − t)˙v(x, t− +n ) +� +�� +� +=0 +− (tn − t) +k2 +n +� +In +(t − tn−1)2∂2 +t v(x, t) dt += +� tn +t +(s − t)∂2 +sv(x, s) ds − (tn − t) +k2 +n +� +In +(t − tn−1)2∂2 +t v(x, t) dt +≤ +� tn +t +|s − t||∂2 +sv(x, s)| ds + |tn − t| +k2 +n +� +In +|t − tn−1|2|∂2 +t v(x, t)| dt +≤ kn +� +In +|∂2 +sv(x, s)| ds + kn +� +In +|∂2 +t v(x, t)| dt += 2kn +� +In +|∂2 +t v(x, t)| dt. +(B.24) +By taking the squared L2(Ωi,n)-norm of v − ˜Inv, we obtain for any t ∈ In that +∥v − ˜Inv∥2 +Ω0 = +� +Ω0 +|(v − ˜Inv)(x, t)|2 dx +(B.24) +≤ +� +Ω0 +����2kn +� +In +|∂2 +t v(x, t)| dt +���� +2 +dx +≤ +� +Ω0 +4k2 +nkn +� +In +|∂2 +t v(x, t)|2 dt dx = 4k3 +n +� +In +∥˙v(2)∥2 +Ω0 dt +≤ 4k4 +n∥˙v(2)∥2 +Ω0,In. +(B.25) +67 + +Taking the square root of both sides of (B.25) and the maximum over In of the left-hand +side, since (B.25) holds for all t ∈ In, proves (B.14) for q = 1. The proof of Lemma B.3 is +thus complete. +References +[1] J. Nitsche, “¨Uber ein Variationsprinzip zur L¨osung von Dirichlet-Problemen bei Ver- +wendung von Teilr¨aumen, die keinen Randbedingungen unterworfen sind,” in Abhand- +lungen aus dem Mathematischen Seminar der Universit¨at Hamburg, vol. 36. Springer, +1971, pp. 9–15. +[2] A. Hansbo and P. Hansbo, “An unfitted finite element method, based on Nitsche’s +method, for elliptic interface problems,” Comp. Methods Appl. Mech. Engrg., vol. 191, +no. 47, pp. 5537–5552, 2002. +[3] A. Hansbo, P. Hansbo, and M. G. Larson, “A finite element method on composite grids +based on Nitsche’s method,” ESAIM, Math. Model. Numer. Anal., vol. 37, no. 03, pp. +495–514, 2003. +[4] E. Burman and M. A. Fern´andez, “Stabilized explicit coupling for fluid-structure +interaction using Nitsche’s method,” C. R. Math. Acad. Sci. Paris, vol. 345, no. 8, +pp. 467–472, 2007. +[5] E. Burman and P. Hansbo, “A unified stabilized method for Stokes’ and Darcy’s +equations,” J. Comput. Appl. Math., vol. 198, no. 1, pp. 35–51, 2007. +[6] R. Becker, E. Burman, and P. Hansbo, “A Nitsche extended finite element method for +incompressible elasticity with discontinuous modulus of elasticity,” Comp. Methods +Appl. Mech. Engrg., vol. 198, no. 41, pp. 3352–3360, 2009. +[7] A. Massing, M. G. Larson, A. Logg, and M. E. Rognes, “A Stabilized Nitsche Fictitious +Domain Method for the Stokes Problem,” Journal of Scientific Computing, vol. 61, +no. 3, pp. 604–628, 2014. +[8] ——, “A stabilized Nitsche overlapping mesh method for the Stokes problem,” Nu- +merische Mathematik, vol. 128, no. 1, pp. 73–101, 2014. +[9] A. Johansson, +M. G. Larson, +and A. Logg, +“High order cut finite element +methods +for +the +Stokes +problem,” +Advanced +Modeling +and +Simulation +in +Engineering +Sciences, +vol. +2, +no. +1, +pp. +1–23, +2015. +[Online]. +Available: +http://dx.doi.org/10.1186/s40323-015-0043-7 +[10] J. S. Dokken, S. W. Funke, A. Johansson, and S. Schmidt, “Shape optimization using +the finite element method on multiple meshes with nitsche coupling,” SIAM Journal +on Scientific Computing, vol. 41, no. 3, pp. A1923–A1948, 2019. +68 + +[11] A. Johansson, B. Kehlet, M. G. Larson, and A. Logg, “Multimesh finite element meth- +ods: Solving PDEs on multiple intersecting meshes,” Computer Methods in Applied +Mechanics and Engineering, 2019. +[12] K. Eriksson and C. Johnson, “ADAPTIVE FINITE ELEMENT METHODS FOR +PARABOLIC PROBLEMS I: A LINEAR MODEL PROBLEM,” SIAM Journal on +Numerical Analysis, vol. 28, no. 1, pp. 43–77, 1991. +[13] ——, “Adaptive Finite Element Methods for Parabolic Problems II: Optimal Error +Estimates in L∞L2 and L∞L∞,” SIAM Journal on Numerical Analysis, vol. 32, no. 3, +pp. 706–740, 1995. +[14] C. Lundholm, “A space-time cut finite element method for a time-dependent parabolic +model problem,” MSc thesis, Chalmers University of Technology and University of +Gothenburg, 2015. +69 + diff --git a/ZNFRT4oBgHgl3EQfPjcf/content/tmp_files/load_file.txt b/ZNFRT4oBgHgl3EQfPjcf/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..395108f825572b3e9a811bd4a7d163bae2b94e53 --- /dev/null +++ b/ZNFRT4oBgHgl3EQfPjcf/content/tmp_files/load_file.txt @@ -0,0 +1,3103 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf,len=3102 +page_content='A cut finite element method for the heat equation on overlapping meshes: L2-analysis for dG(0) mesh movement Mats G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Larson, Carl Lundholm Abstract We present a cut finite element method for the heat equation on two overlapping meshes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By overlapping meshes we mean a mesh hierarchy with a stationary back- ground mesh at the bottom and an overlapping mesh that is allowed to move around on top of the background mesh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Overlapping meshes can be used as an alternative to costly remeshing for problems with changing or evolving interior geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In this paper the overlapping mesh is prescribed a dG(0) movement, meaning that its loca- tion as a function of time is discontinuous and piecewise constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For the discrete function space, we use continuous Galerkin in space and discontinuous Galerkin in time, with the addition of a discontinuity on the boundary between the two meshes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The finite element formulation is based on Nitsche’s method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The dG(0) mesh move- ment results in a space-time discretization with a nice product structure between space and time which allows for existing analysis methodologies to be applied with only minor modifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We follow the analysis methodology presented by Eriksson and Johnson in [12, 13], here referred to as an L2-analysis because of the norm used in the error analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The greatest modification is the use of a shift operator that generalizes the Ritz projection operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The shift operator is used to handle the shift in the overlapping mesh’s location at discrete times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The L2-analysis consists of the corresponding standard stability estimates and a priori error estimate that is of optimal order with respect to both time step and mesh size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We also present numerical results for a problem in one spatial dimension that verify the analytic error convergence orders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Keywords: CutFEM, overlapping meshes, moving meshes, parabolic problem, error anal- ysis 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13517v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='NA] 31 Jan 2023 Contents 1 Introduction 3 2 Problem 5 3 Method 5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Preliminaries .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Finite element spaces .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 7 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 The semi-discrete spaces Vh(t) and Vh(In) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 7 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 The fully discrete spaces V n h and Vh .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 8 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 Finite element formulation .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 10 4 Analytic preliminaries 10 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 The bilinear form Ah,t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 10 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Standard operators that map to Vh(t) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 13 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Shift operator .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 14 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 The bilinear form Bh .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 19 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 Consistency and Galerkin orthogonality .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 21 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 A discrete dual problem .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 23 5 Stability analysis 23 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 The basic stability estimate .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 24 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 The strong stability estimate .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 26 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 Proof of Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (The main stability estimate) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 34 6 A priori error analysis 36 7 Numerical results 43 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Illustrative examples .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 43 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Convergence study .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 45 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 dG(0) in time .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 46 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Illustrative examples .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 46 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Convergence study .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 48 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 dG(1) in time .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 51 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Illustrative examples .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 51 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Convergence study .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 53 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 Comparison with analytic results .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 55 8 Conclusions 55 A Analytic tools 56 B Interpolation 61 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Spatial interpolation operator .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 61 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Temporal interpolation operator .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 65 References 68 2 1 Introduction The finite element method (FEM) is a well-known tool for computing approximate solutions of partial differential equations (PDEs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' It is particularly suitable for PDE-problems with complicated geometry since it allows for unstructured domain-fitted meshes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Unstructured meshes are more computationally expensive to generate and memory demanding to store than structured meshes since there is no underlying structure that may be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Cut finite element methods (CutFEMs) enable the use of structured meshes in problems with complicated geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' CutFEM may also make costly remeshing redundant for problems with changing or evolving geometries or for other situations involving meshing such as adaptive mesh refinement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Using standard FEM for such problems usually means that a new mesh has to be generated when the geometry has changed too much.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' With CutFEM the geometry may be represented by an interface whose location in relation to the mesh may be arbitrary, thus allowing the same mesh to be used for different or changing interfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' A common type of problem with changing geometry is one where there is an object in the solution domain that moves relatively to the domain boundary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' An advantageous CutFEM approach to such problems is to use overlapping meshes, meaning two or more meshes ordered in a mesh hierarchy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This is also called composite grids/meshes and multimesh in the literature but the meaning is the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The idea is to first remove the object from the domain and to generate a stationary background mesh in the empty solution domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The background mesh may thus be a nicely structured mesh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' A second mesh is then generated around the object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The mesh containing the object is then placed “on top” of the background mesh, creating a mesh hierarchy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The movement of the object will thus also cause its encapsulating mesh to move.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Over the past two decades, a theoretical foundation for the formulation of stabilized CutFEM has been developed by extending the ideas of Nitsche, presented in [1], to a general weak formulation of the interface conditions, thereby removing the need for domain- fitted meshes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The foundations of CutFEM were presented in [2] and then extended to overlapping meshes in [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The CutFEM methodology has since been developed and applied to a number of important multiphysics problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' See for example [4, 5, 6, 7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For CutFEM on overlapping meshes in particular, see for example [8, 9, 10, 11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' So far, only CutFEM for stationary PDE-problems on overlapping meshes have been developed and analysed to a satisfactory degree, thus leaving analogous work for time-dependent PDE-problems to be desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The work presented here is intended to be an initial part of developing CutFEM for time-dependent PDE-problems on overlapping meshes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We consider CutFEM for the heat equation on two overlapping meshes: one stationary background mesh and one moving overlapping mesh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Depending on how the mesh movement is represented, quite different space-time discretizations may arise, allowing for different types of analyses of the Cut- FEM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In general the mesh movement may either be continuous or discontinuous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We have considered the simplest case of both of these two types, which we refer to as cG(1) and dG(0) mesh movement, where cG(r) and dG(r) stand for continuous and discontinuous Galerkin of order r, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The mesh movements are named after what type of 3 function the location of the overlapping mesh is when considered as a function of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Thus cG(1) mesh movement means that the location of the overlapping mesh as a function of time is continuous and piecewise linear, and dG(0) mesh movement means that it is discontinuous and piecewise constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In a very first study, we considered cG(1) mesh movement and attempted to follow the analysis methodology presented by Eriksson and Johnson in [12, 13], here referred to as an L2-analysis because of the norm used in the error analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' However, due to the space- time discretization resulting from the cG(1) mesh movement, the L2-analysis failed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The study, containing partial results of the incomplete L2-analysis, was presented in the MSc- thesis [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' With that very first study as a starting point, we retreated in two directions by considering a less demanding energy analysis and the simpler dG(0) mesh movement, meaning less complicated space-time discretization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This has resulted in two new studies with complete analyses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' One study concerns an energy analysis for cG(1) mesh movement, and the other concerns an L2-analysis for dG(0) mesh movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This paper presents the latter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Table 1 gives an overview of the various studies of CutFEM for the heat equation on two overlapping meshes performed so far.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' dG(0) mesh movement cG(1) mesh movement Energy analysis \x13 L2-analysis This paper \x13 MSc-thesis [14] \x17 Table 1: Overview of studies of CutFEM for the heat equation on two overlapping meshes based on analysis and mesh movement type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The checkmark indicates a complete analysis and the x-mark one that is currently incomplete.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In this paper, the overlapping mesh is prescribed a dG(0) movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This results in a discretization that has a product structure between space and time in each slab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Standard analysis methodology therefore work with some modifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We follow the L2-analysis presented by Eriksson and Johnson in [12, 13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The main modification to the standard analysis is the use of a shift operator that generalizes the Ritz projection operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The shift operator is used to handle the shift in the overlapping mesh’s location at discrete times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The general analysis consists of stability and error estimates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The error analysis concerns an optimal order a priori error estimate of the L2-norm of the approximation error at the final time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This estimate shows that the method preserves the so called superconvergence of the error with respect to the time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The outline of the rest of this manuscript is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In Section 2, the original PDE- problem is formulated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In Section 3, the corresponding CutFEM is presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In Section 4, necessary tools for the analysis are presented such as bilinear forms and operators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In Section 5, we present and prove basic and strong stability for the finite element solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In Section 6, we present and prove an optimal order a priori error estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In Section 7, we present numerical results for a problem in one spatial dimension that verify the analytic convergence orders of the approximation error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The last part of this manuscript is an appendix where we present tools used in the analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 4 2 Problem For d = 1, 2, or 3, let Ω0 ⊂ Rd be a bounded convex domain, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', connected open set, with polygonal boundary ∂Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let T > 0 be a given final time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let x ∈ Rd denote the spatial coordinate and t ∈ R denote time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Furthermore, let G ⊂ Ω0 ⊂ Rd be another bounded domain with polygonal boundary ∂G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We let the location of G be time-dependent by prescribing for G a velocity µ : [0, T] → Rd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This means that G and ∂G are functions of time, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', G = G(t) and ∂G = ∂G(t) for t ∈ [0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We point out that the shape of G remains the same for all times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From Ω0 and G, we define the following two domains: : Ω1 : Ω2 : Γ Ω0 µ G Figure 1: Partition of Ω0 into Ω1 (blue) and Ω2 (red) for d = 2 with G moving with veloc- ity µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Ω1 := Ω0 \\ (G ∪ ∂G), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) Ω2 := Ω0 ∩ G, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) with boundaries ∂Ω1 and ∂Ω2, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For i = 1, 2, the set Ωi and its boundary ∂Ωi are functions of time, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', Ωi = Ωi(t) and ∂Ωi = ∂Ωi(t) for t ∈ [0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the common boundary between Ω1 and Ω2 be Γ := ∂Ω1 ∩ ∂Ω2, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) where, of course, Γ also is a function of time, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', Γ = Γ(t), for t ∈ [0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Note that for any t ∈ [0, T], we have the parti- tion Ω0 = Ω1(t) ∪ Γ(t) ∪ Ω2(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) An illustration of the partition of Ω0 as a result of G’s location is shown in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We consider the heat equation in Ω0 × (0, T] with given source function f, homoge- neous Dirichlet boundary conditions, and initial value u0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The problem is: Find u ∈ H1((0, T], L2(Ω0)) ∩ L2((0, T], H2(Ω0) ∩ H1 0(Ω0)) such that � � � � � ˙u − ∆u = f in Ω0 × (0, T], u = 0 on ∂Ω0 × (0, T], u = u0 in Ω0 × {0}, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) where ˙u = ∂tu, ∆ is the Laplace operator, the source function f ∈ L2((0, T], Ω0), and the initial data u0 ∈ H2(Ω0) ∩ H1 0(Ω0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 3 Method 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Preliminaries Let T0 and TG be quasi-uniform simplicial meshes of Ω0 and G, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We denote by hK the diameter of a simplex K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We partition the time interval (0, T] quasi-uniformly into 5 N subintervals In = (tn−1, tn] of length kn = tn − tn−1, where 0 = t0 < t1 < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' < tN = T and n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We assume the following space-time quasi-uniformity of the spatial and temporal discretizations: For h = maxK∈T0∪TG{hK}, and k = max1≤n≤N{kn}, assume that there exist constants C1, C2 > 0 such that h ≤ C1kmin, k ≤ C2hmin, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) where kmin = min1≤n≤N{kn}, and hmin = minK∈T0∪TG{hK}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We next define the following slabwise space-time domains: S0,n := Ω0 × In, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) Si,n := {(x, t) ∈ S0,n : x ∈ Ωi(t)}, i = 1, 2, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) ¯Γn := {(s, t) ∈ S0,n : s ∈ Γ(t)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) In general we will use bar, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=',¯·, to denote something related to space-time, such as domains and variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In addition to the “visible” and uncovered domains Ω1(t) and Ω2(t), we also consider the covered overlap domain ΩO(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To define it, we will use the set of simplices T0,Γ(t) := {K ∈ T0 : K ∩ Γ(t) ̸= ∅}, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', all simplices in T0 that are cut by Γ at time t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We define the overlap domain ΩO(t) for a time t ∈ [0, T] by ΩO(t) := � K∈T0,Γ(t) K ∩ Ω2(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) As a discrete counterpart to the movement of the domain G, we prescribe a dG(0) movement for the overlapping mesh TG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By this we mean that the location of the overlap- ping mesh TG is a dG(0) function with respect to time, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', discontinuous on [0, T] and constant on each In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This means that on each In the position of TG is fixed, but changes from In−1 to In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We take this change to be � In µ(t) dt, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', the total change in the location of G over In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' An illustration of the slabwise space-time domains Si,n, defined by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3), with dG(0) mesh movement is shown in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The dG(0) mesh movement also results in the following: Ωi,n = Ωi(tn) = Ωi(t), ∀t ∈ In, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6a) Γn = Γ(tn) = Γ(t), ∀t ∈ In, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6b) ΩO,n = ΩO(tn) = ΩO(t), ∀t ∈ In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6c) 6 S1,n−1 x2 x1 t S2,n−1 tn−2 S1,n tn S2,n tn−1 Figure 2: Space-time slabs with dG(0) mesh movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Finite element spaces Let {ϕ0,j}j be the set of polynomial interior basis functions of degree p for T0 and let {ϕG,j}j be the set of polynomial basis functions of degree p for TG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Note that the basis functions of TG depend on time as well as space, since the position of TG changes globally in time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 The semi-discrete spaces Vh(t) and Vh(In) For t ∈ [0, T], we define the semi-discrete finite element spaces Vh,0 and Vh,G as the spaces of continuous piecewise polynomials of degree ≤ p on T0 and TG, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We also let the functions in Vh,0 be zero on ∂Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For t ∈ [0, T], let Vh,0 := � v : v(x, t) = � j Vj(t)ϕ0,j(x), Vj : [0, T] → R, ∀j � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) Vh,G := � v : v(x, t) = � j Vj(t)ϕG,j(x, t), Vj : [0, T] → R, ∀j � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) We now use these two spaces to define the broken finite element space Vh(t) as the space of functions that on Ω1(t) are restrictions of functions in Vh,0 to Ω1(t), and on Ω2(t) are restrictions of functions in Vh,G to Ω2(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For t ∈ [0, T], let Vh(t) := {v : v|Ω1(t) = v0|Ω1(t), for some v0 ∈ Vh,0 and v|Ω2(t) = vG|Ω2(t), for some vG ∈ Vh,G}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) 7 See Figure 3 for an illustration of a function v ∈ Vh(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From the dG(0) movement of TG, we have via (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6a) that Vh(t) is the same space for all t ∈ In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We thus write Vh,n = Vh(tn) = Vh(t), ∀t ∈ In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) Now we define the space Vh(In) as the space of functions that lie in Vh,n, for all t ∈ In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, let Vh(In) := {v : v(·, t) ∈ Vh,n, ∀t ∈ In}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) With a general and somewhat relaxed notation, any v ∈ Vh(In) can be represented as v(x, t) = � j Vj(t)ϕj(x), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) where the ϕj’s belong to both {ϕ0,j}j and {ϕG,j}j, and the only restriction on the coeffi- cients Vj is that Vj(t) ∈ R for all t ∈ (0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' x v(x, t) 0 Figure 3: An example of v(x, t) versus x for d = 1, where v(·, t) ∈ Vh(t), p = 1, and time t ∈ (0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The nodes of the blue background mesh T0 are marked with circles and the nodes of the red moving overlapping mesh TG with crosses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 The fully discrete spaces V n h and Vh Now we consider a subspace of Vh(In), which consists of functions whose coefficients have a polynomial time dependence of degree q or lower.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Analogously with the procedure of defining Vh(t), we first define two auxiliary finite element spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, let V n h,0 and V n h,G be the spaces of continuous piecewise polynomials of degree ≤ p on T0 and TG for all t ∈ In, respectively, and polynomials of degree ≤ q in time along the trajectories of T0 and TG for t ∈ In, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We also let the functions in V n h,0 be zero on ∂Ω0 for all 8 t ∈ In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, let V n h,0 := � v : v(x, t) = � j Vj(t)ϕ0,j(x), Vj ∈ Pq(In), ∀j � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) V n h,G := � v : v(x, t) = � j Vj(t)ϕG,j(x), Vj ∈ Pq(In), ∀j � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) where Pq(In) is the space of polynomials of degree ≤ q on In;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' see Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We now use these two spaces to define the broken finite element space V n h as the space of functions that on S1,n are restrictions of functions in V n h,0 to S1,n, and on S2,n are restrictions of functions in V n h,G to S2,n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, let V n h := {v : v|S1,n = vn 0 |S1,n, for some vn 0 ∈ V n h,0, and v|S2,n = vn G|S2,n, for some vn G ∈ V n h,G}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) Finally, we define the finite element space Vh as the space of functions that lie in V n h for n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N: Vh := {v : v|S0,n ∈ V n h , n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) Vj(t) t tk−1 tk−2 tk tk+1 Vj(t) t tk−1 tk−2 tk tk+1 Figure 4: Examples of Vj(t) versus t on three subsequent time subintervals In = (tn−1, tn], for Vj ∈ Pq(In).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Left: q = 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', Vj is constant on each In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Right: q = 1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', Vj is at most linear on each In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 9 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 Finite element formulation We may now formulate the space-time cut finite element formulation for the problem described in Section 2 as follows: Find uh ∈ Vh such that N � n=1 � � In ( ˙uh, v)Ω0 dt + ([uh]n−1, v+ n−1)Ω0 � + 2 � i=1 N � n=1 � In (∇uh, ∇v)Ωi,n dt + N � n=1 � � ¯Γn −⟨∂nuh⟩[v] − ⟨∂nv⟩[uh] + γh−1 K [uh][v] d¯s � + N � n=1 � In ([∇uh], [∇v])ΩO,n dt = � T 0 (f, v)Ω0 dt (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) for all v ∈ Vh, where Vh is defined by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16), In = (tn−1, tn], (·, ·)Ω is the L2(Ω)-inner product, [v]n is the jump in v at time tn, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', [v]n = v+ n − v− n , v± n = limε→0+ v(x, tn ± ε), Ωi,n = Ωi(tn), ¯Γn = Γn × In, ⟨v⟩ is a convex weighted average of v on Γ, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', ⟨v⟩ = ω1v1 + ω2v2, where ω1, ω2 ∈ [0, 1] and ω1 + ω2 = 1, vi = limε→0+ v(¯s − εni), ¯s = (s, t), n is the normal vector to Γ (it should not be confused with time index n, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', in tn), ∂nv = n · ∇v, [v] is the jump in v over Γ, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', [v] = v1 − v2, γ ≥ 0 is a stabilization parameter, hK = hK(x) = hK0 for x ∈ K0, where hK0 is the diameter of simplex K0 ∈ T0, and ΩO,n is defined by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 4 Analytic preliminaries 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 The bilinear form Ah,t For t ∈ [0, T] and k ∈ N we define the broken Sobolev spaces Hk(Ω1(t), Ω2(t)) := Hk(∪iΩi(t)) := {v ∈ L2(Ω0) : v|Ωi(t) ∈ Hk(Ωi(t)), i = 1, 2}, Hk 0 (Ω1(t), Ω2(t)) := Hk 0 (∪iΩi(t)) := {v ∈ Hk(Ω1(t), Ω2(t)) : v|∂Ω0 = 0}, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) where Hk denotes the Sobolev space W k,2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We define the symmetric bilinear form Ah,t on H1(∪iΩi(t)) by Ah,t(w, v) := 2 � i=1 (∇w, ∇v)Ωi(t) − (⟨∂nw⟩, [v])Γ(t) − (⟨∂nv⟩, [w])Γ(t) + (γh−1 K [w], [v])Γ(t) + ([∇w], [∇v])ΩO(t), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) 10 where (w, v)Γ(t) is the L2(Γ(t))-inner product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From the dG(0) movement of TG, we have via (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) that Ah,t is the same bilinear form for all t ∈ In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We thus write An = Ah,tn = Ah,t, ∀t ∈ In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) Note that we have � ¯Γn wv d¯s = � In (w, v)Γ(t) dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) Using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) and the bilinear form Ah,t, we may write the finite element variational formu- lation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) as: Find uh ∈ Vh such that N � n=1 � � In ( ˙uh, v)Ω0 dt + ([uh]n−1, v+ n−1)Ω0 � + N � n=1 � In An(uh, v) dt = � T 0 (f, v)Ω0 dt, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) for all v ∈ Vh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Recall that T0,Γ(t) is the set of all simplices in T0 that are cut by Γ(t) and let ΓK(t) := K ∩ Γ(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We define the following two mesh-dependent norms: ∥w∥2 1/2,h,Γ(t) := � K∈T0,Γ(t) h−1 K ∥w∥2 ΓK(t), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) ∥w∥2 −1/2,h,Γ(t) := � K∈T0,Γ(t) hK∥w∥2 ΓK(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) Note that ∥w∥2 Γ(t) = � K∈T0,Γ(t) hKh−1 K ∥w∥2 ΓK(t) ≤ h � K∈T0,Γ(t) h−1 K ∥w∥2 ΓK(t) = h∥w∥2 1/2,h,Γ(t), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) and (w, v)Γ(t) = � Γ(t) (h1/2 K w)(h−1/2 K v) ds ≤ � � Γ(t) hKw2 ds �1/2� � Γ(t) h−1 K v2 ds �1/2 = � � K∈T0,Γ(t) hK � ΓK(t) w2 ds �1/2� � K∈T0,Γ(t) h−1 K � ΓK(t) v2 ds �1/2 = ∥w∥−1/2,h,Γ(t)∥v∥1/2,h,Γ(t), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) where h = maxKl∈T0∪TG(hKl).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' With the two mesh-dependent norms, we define the time- dependent spatial energy norm |||·|||Ah,t on H1 0(Ω1(t), Ω2(t)) by |||w|||2 Ah,t := 2 � i=1 ∥∇w∥2 Ωi(t) + ∥⟨∂nw⟩∥2 −1/2,h,Γ(t) + ∥[w]∥2 1/2,h,Γ(t) + ∥[∇w]∥2 ΩO(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) Note that boundedness of Ah,t on � H1 0(Ω1(t), Ω2(t)), |||·|||Ah,t � follows trivially from using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We are now ready to prove coercivity of Ah,t on Vh(t) with respect to |||·|||Ah,t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 11 Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (Discrete coercivity of Ah,t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the bilinear form Ah,t and the energy norm |||·|||Ah,t be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then, for t ∈ [0, T] and γ sufficiently large, there exists a constant αt > 0 such that Ah,t(v, v) ≥ αt |||v|||2 Ah,t , ∀v ∈ Vh(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Following the proof of the coercivity in [2], we start by inserting v ∈ Vh(t) into Ah,t: Ah,t(v, v) = 2 � i=1 (∇v, ∇v)Ωi(t) − (⟨∂nv⟩, [v])Γ(t) − (⟨∂nv⟩, [v])Γ(t) + (γh−1 K [v], [v])Γ(t) + ([∇v], [∇v])ΩO(t) = 2 � i=1 ∥∇v∥2 Ωi(t) − 2(⟨∂nv⟩, [v])Γ(t) + γ∥[v]∥2 1/2,h,Γ(t) + ∥[∇v]∥2 ΩO(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) The second term in the last row of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) with opposite sign is 2(⟨∂nv⟩, [v])Γ(t) ≤ 2∥⟨∂nv⟩∥−1/2,h,Γ(t)∥[v]∥1/2,h,Γ(t) ≤ 1 ε∥⟨∂nv⟩∥2 −1/2,h,Γ(t) + ε∥[v]∥2 1/2,h,Γ(t) = 2 ε∥⟨∂nv⟩∥2 −1/2,h,Γ(t) − 1 ε∥⟨∂nv⟩∥2 −1/2,h,Γ(t) + ε∥[v]∥2 1/2,h,Γ(t) ≤ 2 εCI � 2 � i=1 ∥∇v∥2 Ωi(t) + ∥[∇v]∥2 ΩO(t) � − 1 ε∥⟨∂nv⟩∥2 −1/2,h,Γ(t) + ε∥[v]∥2 1/2,h,Γ(t), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) where ε > 0 is to be chosen and CI > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We have used (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) to obtain the first inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To obtain the last inequality, we have used the inverse inequality from Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Inserting (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) into (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) gives Ah,t(v, v) ≥ 2 � i=1 ∥∇v∥2 Ωi(t) − 2 εCI � 2 � i=1 ∥∇v∥2 Ωi(t) + ∥[∇v]∥2 ΩO(t) � + 1 ε∥⟨∂nv⟩∥2 −1/2,h,Γ(t) − ε∥[v]∥2 1/2,h,Γ(t) + γ∥[v]∥2 1/2,h,Γ(t) + ∥[∇v]∥2 ΩO(t) = � 1 − 2CI ε � 2 � i=1 ∥∇v∥2 Ωi(t) + 1 ε∥⟨∂nv⟩∥2 −1/2,h,Γ(t) + (γ − ε)∥[v]∥2 1/2,h,Γ(t) + � 1 − 2CI ε � ∥[∇v]∥2 ΩO(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) By taking ε > 2CI, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', ε = 4CI, and γ > ε we may obtain (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) from (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 12 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Standard operators that map to Vh(t) Here we define some standard spatial operators for every t ∈ [0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The L2(Ω0)-projection operator Ph,t : L2(Ω0) → Vh(t) is defined by (Ph,tw, v)Ω0 = (w, v)Ω0, ∀v ∈ Vh(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) The Ritz projection operator Rh,t : H1(∪iΩi(t)) → Vh(t) is defined by Ah,t(Rh,tw, v) = Ah,t(w, v), ∀v ∈ Vh(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 (Estimates for the Ritz projection error).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the spatial energy norm |||·|||Ah,t and the Ritz projection operator Rh,t be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exist constants C1, C2 > 0 such that for any w ∈ Hp+1(Ω0) ∩ H1 0(Ω0) we have that |||w − Rh,tw|||Ah,t ≤ C1hp∥Dp+1 x w∥Ω0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) ∥w − Rh,tw∥Ω0 ≤ C2hp+1∥Dp+1 x w∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The proof is basically the same as in the standard case but with natural modifica- tions to account for the CutFEM setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' First we will show the energy estimate (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17), then we will use it together with the Aubin-Nitsche duality trick to show (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let δ = w − Rh,tw denote the projection error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We start by splitting the error using the interpolant Ih,tw ∈ Vh(t), where Ih,t is the spatial interpolation operator defined by (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1), δ = w − Rh,tw ±Ih,tw � �� � =0 = w − Ih,tw � �� � =π + Ih,tw − Rh,tw � �� � =η = π + η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19) We then consider |||δ|||Ah,t = |||π + η|||Ah,t ≤ |||π|||Ah,t + |||η|||Ah,t , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) where we focus on the η-part first.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We note that η ∈ Vh(t) and use Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', the discrete coercivity of Ah,t, to get |||η|||2 Ah,t ≤ 1 αt Ah,t(η, η) = CAh,t(Ih,tw − Rh,tw ± w, η) = CAh,t(Ih,tw − w � �� � =−π , η) + C Ah,t(w − Rh,tw, η) � �� � =0 from (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) = −CAh,t(π, η) ≤ C |||π|||Ah,t |||η|||Ah,t , =⇒ |||η|||Ah,t ≤ C |||π|||Ah,t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) Using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20), we get |||w − Rh,tw|||Ah,t = |||δ|||Ah,t ≤ C |||π|||Ah,t = C |||w − Ih,tw|||Ah,t (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) ≤ Chp∥Dp+1 x w∥Ω0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) 13 which is (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We consider the auxiliary problem: Find φ ∈ H2(Ω0) ∩ H1 0(Ω0) such that − ∆φ = δ in Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) We note that ∇φ ∈ H1(Ω0) from φ ∈ H2(Ω0), which means that ∇φ|Γ(t) ∈ L2(Γ(t)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Thus [∂nφ]|Γ(t) = 0 in L2(Γ(t)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We denote by Ih,p=1,t the spatial interpolation operator Ih,t for p = 1, and note that Ih,p=1,tφ ∈ Vh(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The square of the left-hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) is ∥w − Rh,tw∥2 Ω0 = (δ, δ)Ω0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) = (−∆φ, δ)Ω0 (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) = Ah,t(φ, δ) = Ah,t(φ, w − Rh,tw) 5th = Ah,t(φ − Ih,p=1,tφ, w − Rh,tw) ≤ C |||φ − Ih,p=1,tφ|||Ah,t |||w − Rh,tw|||Ah,t (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2),(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) ≤ C � Ch∥D2 xφ∥Ω0 �� Chp∥Dp+1 x w∥Ω0 � 8th ≤ Chp+1∥∆φ∥Ω0∥Dp+1 x w∥Ω0 = Chp+1∥w − Rh,tw∥Ω0∥D2 xw∥Ω0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) where, in the fifth step, we have used that w − Rh,tw is Ah,t-orthogonal to Vh(t), which follows from the definition of Rh,t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In the eighth step, we have used elliptic regularity on H2(Ω0) ∩ H1 0(Ω0) for φ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Dividing both sides of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) by a factor ∥w − Rh,tw∥Ω0 gives (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Note that Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 provides estimates for the approximation error for the elliptic prob- lems corresponding to (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This is so since the discrete elliptic solution is ex- actly the Ritz projection of the continuous one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The discrete Laplacian ∆h,t : H1(∪iΩi(t)) → Vh(t) is defined by (−∆h,tw, v)Ω0 = Ah,t(w, v), ∀v ∈ Vh(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) From the dG(0) movement of TG, we have via (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) that Pn = Ph,tn = Ph,t, ∀t ∈ In, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='26) Rn = Rh,tn = Rh,t, ∀t ∈ In, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='27) ∆n = ∆h,tn = ∆h,t, ∀t ∈ In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='28) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Shift operator Here, we introduce a shift operator not present in the standard Eriksson and Johnson analysis, presented in [12, 13], that is needed because of the dG(0) mesh movement in the CutFEM setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The shift operator will be used in the proof of Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', the strong stability estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' At one point in the proof, one would like to consider Rnu− h,n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This is however undefined in the current setting because of the shifting discontinuity coming from the movement of Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Since Rn is only defined for functions in H1(∪iΩi,n) and u− h,n−1 ∈ Vh,n−1 ⊂ H1(∪iΩi,n−1), we cannot talk about Rnu− h,n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Enter shift operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The idea is to consider a Ritzlike projection from one discrete space to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 14 To define the shift operator, we will use a special bilinear form An−1,n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To define An−1,n, we will use a partition of Ω0 into the subdomains ωij = ωi,j,n−1 := Ωi,n−1 ∩ Ωj,n, for i, j = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='29) For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, we define the non-symmetric bilinear form An−1,n on H1(∪ijωij) by An−1,n(v, w) := 2 � i,j=1 (∇v, ∇w)ωij − (⟨∂nv⟩, [w])Γn − ([v], ⟨∂nw⟩)Γn−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='30) Using the energy norm |||·|||Ah,t defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) together with (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3), we define two related energy norms, one on H1 0(∪iΩi,n−1) and the other on H1 0(∪iΩi,n), by |||v|||2 An−1 := |||v|||2 An−1 + ∥⟨∂nv⟩∥2 −1/2,h,Γn, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='31) |||w|||2 An := |||w|||2 An + ∥⟨∂nw⟩∥2 −1/2,h,Γn−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='32) With these two norms, we may obtain a continuity result for An−1,n, which we present as the following lemma: Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 (Continuity of An−1,n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the bilinear form An−1,n be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='30), and the two norms |||·|||An−1 and |||·|||An by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='31) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='32), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that An−1,n(v, w) ≤ C |||v|||An−1 |||w|||An , ∀v ∈ H1(∪iΩi,n−1) and ∀w ∈ H1(∪iΩi,n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='33) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The proof is straightforward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The left-hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='33) is An−1,n(v, w) = 2 � i,j=1 (∇v, ∇w)ωij � �� � =I − (⟨∂nv⟩, [w])Γn � �� � =II − ([v], ⟨∂nw⟩)Γn−1 � �� � =III .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='34) We treat the terms separately, starting with the first: I = 2 � i,j=1 (∇v, ∇w)ωij ≤ � 2 � i,j=1 ∥∇v∥2 ωij �1/2� 2 � i,j=1 ∥∇w∥2 ωij �1/2 = � 2 � i=1 ∥∇v∥2 Ωi,n−1 �1/2� 2 � i=1 ∥∇w∥2 Ωi,n �1/2 ≤ |||v|||An−1 |||w|||An ≤ |||v|||An−1 |||w|||An , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='35) where we have used that v ∈ H1(∪iΩi,n−1) and w ∈ H1(∪iΩi,n) to merge integrals over ωij’s to integrals over Ωi’s, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', ∥∇v∥2 ωi1 + ∥∇v∥2 ωi2 = ∥∇v∥2 Ωi,n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The second term is II = (⟨∂nv⟩, [w])Γn (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) ≤ ∥⟨∂nv⟩∥−1/2,h,Γn∥[w]∥1/2,h,Γn ≤ |||v|||An−1 |||w|||An .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='36) 15 The third term is treated in the same way, thus III = ([v], ⟨∂nw⟩)Γn−1 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) ≤ ∥[v]∥1/2,h,Γn−1∥⟨∂nw⟩∥−1/2,h,Γn−1 ≤ |||v|||An−1 |||w|||An .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='37) Collecting the estimates for the three terms gives the continuity result (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By restricting v and w in Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 to the corresponding discrete subspaces, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', Vh,n−1 and Vh,n, respectively, we may obtain a continuity result in the weaker An-norms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This is done by estimating the average terms in the An-norms using an inverse inequality that is a twist on the one from Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The treatment is analogous for both average terms in the An-norms so we only consider the one in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='31).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This term is ∥⟨∂nv⟩∥2 −1/2,h,Γn = ∥⟨∂nv⟩∥2 −1/2,h,Γn∩Γn−1 + ∥⟨∂nv⟩∥2 −1/2,h,Γn\\Γn−1 ≤ ∥⟨∂nv⟩∥2 −1/2,h,Γn−1 + ∥⟨∂nv⟩∥2 −1/2,h,Γn\\Γn−1 ≤ C |||v|||2 An−1 + ∥⟨∂nv⟩∥2 −1/2,h,Γn\\Γn−1, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='38) where we also want to estimate the second term by |||v|||2 An−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We do this by following the proof of Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4, omitting some of the steps that are the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Partitioning Γn \\ Γn−1 into `Γi := (Γn \\ Γn−1) ∩ Ωi,n−1, using the interdependent indices i and j, and writing `ΓiKj = Kj ∩ `Γi, we have for v ∈ Vh,n−1 that ∥⟨∂nv⟩∥2 −1/2,h,Γn\\Γn−1 = 2 � i=1 ∥⟨∂nv⟩∥2 −1/2,h,`Γi = 2 � i=1 � K0∈T0,`Γi hK0∥⟨∂nv⟩∥2 `ΓiK0 (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) ≤ C 2 � i=1 � Kj∈Tj,`Γi hKj∥⟨∂nv⟩∥2 `ΓiKj = C � `ΓiKj hKj∥⟨∂nv⟩∥2 `ΓiKj ≤ C � `ΓiKj hKj � ∥(∇v)+∥2 `ΓiKj + ∥(∇v)−∥2 `ΓiKj � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) ≤ C � `ΓiKj hKj � Ch−1 K+ j ∥∇v∥2 K+ j + Ch−1 K− j ∥∇v∥2 K− j � ≤ C � `ΓiKj � ∥∇v∥2 K+ j + ∥∇v∥2 K− j � ≤ C � ∥∇v∥2 Ω1,n−1 + ∥(∇v)1∥2 ΩO,n−1 + ∥∇v∥2 Ω2,n−1 � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) ≤ C � 2 � i=1 ∥∇v∥2 Ωi,n−1 + ∥[∇v]∥2 ΩO,n−1 � ≤ C |||v|||2 An−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='39) 16 By plugging (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='39) into (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='38), we get for v ∈ Vh,n−1 that ∥⟨∂nv⟩∥−1/2,h,Γn ≤ C |||v|||An−1 , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='40) and the analogous results for w ∈ Vh,n ∥⟨∂nw⟩∥−1/2,h,Γn−1 ≤ C |||w|||An .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='41) Using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='40) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='41) in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='31) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='32), respectively, we may obtain the discrete norm equivalences |||v|||An−1 ≤ |||v|||An−1 ≤ C |||v|||An−1 , ∀v ∈ Vh,n−1, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='42) |||w|||An ≤ |||w|||An ≤ C |||w|||An , ∀w ∈ Vh,n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='43) By restricting the functions in Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 to the discrete subspaces, we may use the above norm equivalences to obtain a discrete continuity result which we present as the following corollary: Corollary 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (Discrete continuity of An−1,n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the bilinear form An−1,n and the spatial energy norm |||·|||An be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='30) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that An−1,n(v, w) ≤ C |||v|||An−1 |||w|||An , ∀v ∈ Vh,n−1 and ∀w ∈ Vh,n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='44) We are now ready to move on to the shift operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Definition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (Shift operators).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For every time tn−1, where n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, we define the two shift operators S + n−1 : Vh,n−1 → Vh,n and S − n−1 : Vh,n → Vh,n−1 by An(S + n−1v, w) = An−1,n(v, w), ∀w ∈ Vh,n, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45) An−1(v, S − n−1w) = An−1,n(v, w), ∀v ∈ Vh,n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='46) The forward in time shift operator S + n−1 is the main one that is used in the analysis, and we write Sn = S + n−1 for brevity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For all the results we present and prove for Sn, there are corresponding ones for S − n−1 that are proven in an analogous way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For v ∈ Vh,n−1, using Snv ∈ Vh,n, the discrete coercivity of An, the definition of Sn, and the discrete continuity of An−1,n, we get that α |||Snv|||2 An (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) ≤ An(Snv, Snv) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45) = An−1,n(v, Snv) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='44) ≤ C |||v|||An−1 |||Snv|||An .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='47) From (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='47) we obtain the following stability of the shift operator: |||Snv|||An ≤ C |||v|||An−1 , ∀v ∈ Vh,n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='48) The shift operator has two approximability properties that are essential for its application in the analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We present and prove these properties in the following two lemmas: 17 Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 (An estimate for the shift error).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the shift operator Sn = S + n−1 and the spatial energy norm |||·|||An−1 be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that ∥v − Snv∥Ω0 ≤ Ch |||v|||An−1 , ∀v ∈ Vh,n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='49) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The proof is based on the proof of Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', estimates for the Ritz projection error but involves a few modifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let δ = v−Snv denote the shift error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We consider the auxiliary problem: Find φ ∈ H2(Ω0) ∩ H1 0(Ω0) such that − ∆φ = δ in Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='50) We note that ∇φ ∈ H1(Ω0) from φ ∈ H2(Ω0), which means that ∇φ|Γn∪Γn−1 ∈ L2(Γn ∪ Γn−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Thus [∂nφ]|Γn∪Γn−1 = 0 in L2(Γn ∪ Γn−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We denote by Ih,n = Ih,p=1,n the spatial interpolation operator Ih,t, defined by (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1), for p = 1 and t = tn, and note that Ih,nφ ∈ Vh,n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The square of the left-hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='49) is ∥v − Snv∥2 Ω0 = (δ, δ)Ω0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='50) = (−∆φ, δ)Ω0 = (−∆φ, v)Ω0 − (−∆φ, Snv)Ω0 (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7),(A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) = 2 � i=1 (∇φ, ∇v)Ωi,n−1 − (⟨∂nφ⟩, [v])Γn−1 − An(φ, Snv) 5th = An−1,n(v, φ) − An(φ, Snv) ± An−1,n(v, Ih,nφ) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45) = An−1,n(v, φ − Ih,nφ) − An(Snv, φ − Ih,nφ) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='33),(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='42) ≤ C |||v|||An−1 |||φ − Ih,nφ|||An + C |||Snv|||An |||φ − Ih,nφ|||An (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9),(B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) ≤ C |||v|||An−1 � Ch∥D2 xφ∥Ω0 � + C |||Snv|||An � Ch∥D2 xφ∥Ω0 � 9th ≤ Ch |||v|||An−1 ∥∆φ∥Ω0 + Ch |||v|||An−1 ∥∆φ∥Ω0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='50) = Ch |||v|||An−1 ∥δ∥Ω0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='51) where we have used that [φ]|Γn = 0 to go to An−1,n in the fifth step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In the ninth step, we have used elliptic regularity on H2(Ω0) ∩ H1 0(Ω0) for φ, and the stability of Sn given by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='48).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Dividing both sides of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='51) by a factor ∥δ∥Ω0 = ∥v − Snv∥Ω0 gives (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='49).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 (An estimate for the shift energy).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the bilinear form An be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2), the shift operator Sn = S + n−1 by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45), the discrete Laplacian ∆n by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25), and the spatial energy norm |||·|||An by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that An−1(v, v) − An(Snv, Snv) ≤ Ch |||v|||An−1 ∥∆n−1v∥Ω0, ∀v ∈ Vh,n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='52) 18 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The left-hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='52) is An−1(v, v) − An(Snv, Snv) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45) = An−1(v, v) − An−1,n(v, Snv) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='46) = An−1(v, v) − An−1(v, S − n−1Snv) = An−1(v, v − S − n−1Snv � �� � ∈Vh,n−1 ) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = (−∆n−1v, v − S − n−1Snv)Ω0 ≤ ∥∆n−1v∥Ω0∥v − S − n−1Snv ± Snv∥Ω0 ≤ ∥∆n−1v∥Ω0 � ∥v − Snv∥Ω0 + ∥Snv − S − n−1Snv∥Ω0 � 7th ≤ ∥∆n−1v∥Ω0 � Ch |||v|||An−1 + Ch |||Snv|||An � (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='48) ≤ ∥∆n−1v∥Ω0 � Ch |||v|||An−1 + Ch |||v|||An−1 � = Ch∥∆n−1v∥Ω0 |||v|||An−1 , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='53) where, in the seventh step, we have used the estimate for the shift error given by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='49) for Sn and a corresponding result for S − n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By switching the order of the terms on the left-hand side of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='52), and following the same steps as in the proof of Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5, we may obtain the same estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Moving the term without the shift operator to the right-hand side gives us a result that we present as the following corollary: Corollary 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 (A stability result for the shift operator).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the bilinear form An be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2), the shift operator Sn = S + n−1 by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45), the discrete Laplacian ∆n by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25), and the spatial energy norm |||·|||An by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that An(Snv, Snv) ≤ An−1(v, v) + Ch |||v|||An−1 ∥∆n−1v∥Ω0, ∀v ∈ Vh,n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='54) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 The bilinear form Bh For k ∈ N we define the broken Bochner-Sobolev spaces Hk({In}N n=1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' L2(Ω0)) := {v ∈ L2((0, T];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' L2(Ω0)) : v|S0,n ∈ Hk(In;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' L2(Ω0)) for n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='55) 19 We define the non-symmetric bilinear form Bh on H1({In}N n=1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' L2(Ω0))∩L2((0, T];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' H1(∪iΩi(t))) by Bh(w, v) := N � n=1 � In ( ˙w, v)Ω0 dt + N � n=1 � In Ah,t(w, v) dt + N−1 � n=1 ([w]n, v+ n )Ω0 + (w+ 0 , v+ 0 )Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56) We may then write (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) in compact form as: Find uh ∈ Vh such that Bh(uh, v) = (u0, v+ 0 )Ω0 + � T 0 (f, v)Ω0 dt, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='57) for all v ∈ Vh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By partially integrating the first term in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56), the bilinear form Bh can be expressed differently, as noted in the following lemma: Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 (Temporal partial integration in Bh).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The bilinear form Bh, defined in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56), can be written as Bh(w, v) = N � n=1 � In (w, −˙v)Ω0 dt + N � n=1 � In Ah,t(w, v) dt + N−1 � n=1 (w− n , −[v]n)Ω0 + (w− N, v− N)Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='58) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The first term in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56) is N � n=1 � In ( ˙w, v)Ω0 dt = N � n=1 � In � Ω0 ˙wv dx dt = N � n=1 � Ω0 � � In ˙wv dt � dx = N � n=1 � Ω0 �� wv � In − � In w ˙v dt � dx = N � n=1 � Ω0 w− n v− n − w+ n−1v+ n−1 dx + N � n=1 � Ω0 � In −w ˙v dt dx = N � n=1 � (w− n , v− n )Ω0 − (w+ n−1, v+ n−1)Ω0 � + N � n=1 � In (w, −˙v)Ω0 dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='59) where the second term in the last row is as we want it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We combine the first term in the 20 last row of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='59) with the third and fourth terms in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56) to yield N � n=1 � (w− n ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v− n )Ω0 − (w+ n−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ n−1)Ω0 � + N−1 � n=1 ([w]n,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ n )Ω0 + (w+ 0 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ 0 )Ω0 = N−1 � n=1 � (w− n ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v− n )Ω0 − (w+ n−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ n−1)Ω0 + (w+ n ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ n )Ω0 − (w− n ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ n )Ω0 � + (w− N,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v− N)Ω0 − (w+ N−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ N−1)Ω0 + (w+ 0 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ 0 )Ω0 = N−1 � n=1 � (w− n ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v− n )Ω0 − (w− n ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ n )Ω0 � + (w+ 0 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ 0 )Ω0 − (w+ 0 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ 0 )Ω0 � �� � = 0 +(w− N,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v− N)Ω0 + (w+ N−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ N−1)Ω0 − (w+ N−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v+ N−1)Ω0 � �� � = 0 = N−1 � n=1 (w− n ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' −[v]n)Ω0 + (w− N,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' v− N)Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='60) Using the identities (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='59) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='60) in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56) gives (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='58).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 Consistency and Galerkin orthogonality To show Galerkin orthogonality for the bilinear form Bh, we need the following lemma on consistency: Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 (Consistency).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The solution u to problem (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) also solves (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Insert u in place of uh in the expression on the left-hand side of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From the regularity of u, we have, for n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, [u]n−1 = 0, [u] = 0 and [∇u] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Writing � i,n = �2 i=1 �N n=1, the left-hand side of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) with u becomes N � n=1 � In ( ˙u, v)Ω0 dt + � i,n � In (∇u, ∇v)Ωi,n dt + N � n=1 � ¯Γn −⟨∂nu⟩[v] d¯s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='61) The second term in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='61) is � i,n � In (∇u, ∇v)Ωi,n dt = � i,n � In � � Ωi,n ∇u · ∇v dx � dt = � i,n � In � � Ωi,n −∆uv dx + � ∂Ωi,n (n · ∇uv)i ds � dt = � i,n � In (−∆u, v)Ωi,n dt + � i,n � In � ∂Ωi,n (n · ∇uv)i ds dt, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='62) 21 where ni is the outward pointing normal vector to ∂Ωi,n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We leave the first term in the last row of (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='62) as it is and consider the second term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' � i,n � In � ∂Ωi,n (n · ∇uv)i ds dt = � i,n � In � � ∂Ωi,n∩∂Ω0 (n · ∇uv)i ds � �� � =0, since v=0 on ∂Ω0 + � Γn (n · ∇uv)i ds � dt = � i,n � In � Γn (n · ∇uv)i ds dt = N � n=1 2 � i=1 � ¯Γn (n · ∇uv)i d¯s = N � n=1 � ¯Γn n1 · ∇u1v1 + n2 · ∇u2v2 d¯s 5th = N � n=1 � ¯Γn n · [∇uv] d¯s = N � n=1 � ¯Γn [(∂nu)v] d¯s 7th = N � n=1 � ¯Γn [∂nu]⟨v⟩ + ⟨∂nu⟩[v] + (ω2 − ω1)[∂nu][v] d¯s = N � n=1 � ¯Γn ⟨∂nu⟩[v] d¯s, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='63) where we have taken n = n1 = −n2 and [v] = v1 − v2 to obtain the fifth equality, applied (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) to get the seventh equality and finally, to obtain the last equality, we have used [∂nu] = 0, which follows from the regularity of u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Using the identities (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='62) and (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='63), we have that (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='61), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', the left-hand side of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) with u instead of uh, is N � n=1 � In ( ˙u, v)Ω0 dt + � i,n � In (∇u, ∇v)Ωi,n dt + N � n=1 � ¯Γn −⟨∂nu⟩[v] d¯s = N � n=1 � In ( ˙u, v)Ω0 dt + � i,n � In (−∆u, v)Ωi,n dt + N � n=1 � ¯Γn ⟨∂nu⟩[v] − ⟨∂nu⟩[v] � �� � =0 d¯s = � i,n � In ( ˙u − ∆u, v)Ωi,n dt = � T 0 ( ˙u − ∆u, v)Ω0 dt (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) = � T 0 (f, v)Ω0 dt, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='64) which is the right-hand side of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7, we have that u solves (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Since (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='57) is just another way of writing (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17), u solves (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='57) as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From this we may obtain a Galerkin orthogonality which we present as the following corollary: 22 Corollary 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 (Galerkin orthogonality).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the bilinear form Bh be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56), and let u and uh be the solutions of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then Bh(u − uh, v) = 0, ∀v ∈ Vh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='65) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 A discrete dual problem We now consider the function zh ∈ Vh defined by Bh(v, zh) = (v− N, z+ h,N)Ω0, ∀v ∈ Vh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='66) From (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='66), the function zh is the solution of a discrete dual problem to (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' With the alternative way of expressing Bh from Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6, we may write (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='66) as the following discrete dual problem that goes backwards in time: Find zh ∈ Vh such that N � n=1 � In (v, − ˙zh)Ω0 dt + N � n=1 � In Ah,t(v, zh) dt + N−1 � n=1 (v− n , −[zh]n)Ω0 + (v− N, z− h,N)Ω0 = (v− N, z+ h,N)Ω0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='67) for all v ∈ Vh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Thus, we may consider zh to be the finite element solution of the following continuous dual problem: � � � � � − ˙z − ∆z = 0 in Ω0 × [0, T), z = 0 on ∂Ω0 × [0, T), z = z+ h,N in Ω0 × {T}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='68) 5 Stability analysis The stability analysis in this section is based on a stability analysis for the case with only a background mesh, presented by Eriksson and Johnson in [12, 13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Due to the CutFEM setting, the original analysis has been slightly modified by the incorporation of the shift operator defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The main result of this section is the following stability estimate and its counterpart for the discrete dual problem: Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (The main stability estimate).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let uh be the solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) with f ≡ 0 and let u0 be the initial value of the analytic solution of the problem presented in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then we have that ∥u− h,N∥Ω0 + N � n=1 � In ∥ ˙uh∥Ω0 + ∥∆nuh∥Ω0 dt + N � n=1 ∥[uh]n−1∥Ω0 ≤ C1∥u0∥Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) where C1 = C(log(tN/k1) + 1)1/2 and C > 0 is a constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 23 The counterpart of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) for zh is a crucial tool in the proof of the a priori error estimate presented in Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For the purpose of that application, we replace the initial time jump term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1), we have that ∥[uh]0∥Ω0 ≤ C1∥u0∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The corresponding inequality for zh is ∥[zh]N∥Ω0 ≤ CN∥z+ h,N∥Ω0, where CN = C(log(tN/kN)+1)1/2 and C > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Squaring both sides in this inequality gives us C2 N∥z+ h,N∥2 Ω0 ≥ ∥[zh]N∥2 Ω0 = ([zh]N, [zh]N)Ω0 = (z+ h,N − z− h,N, z+ h,N − z− h,N)Ω0 ≥ ∥z+ h,N∥2 Ω0 � �� � ≥0 −2(z+ h,N, z− h,N)Ω0 + ∥z− h,N∥2 Ω0 ≥ ∥z− h,N∥2 Ω0 − 2(z+ h,N, z− h,N)Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) from which we get ∥z− h,N∥2 Ω0 ≤ C2 N∥z+ h,N∥2 Ω0 + 2(z+ h,N, z− h,N)Ω0 ≤ C2 N∥z+ h,N∥2 Ω0 + 2∥z+ h,N∥Ω0∥z− h,N∥Ω0 ≤ C2 N∥z+ h,N∥2 Ω0 + 2∥z+ h,N∥2 Ω0 + 1 2∥z− h,N∥2 Ω0, =⇒ ∥z− h,N∥Ω0 ≤ CN∥z+ h,N∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) We use this results in the corresponding stability estimate for zh, which we present as the following corollary: Corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (A stability estimate for zh).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' A corresponding stability estimate to (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) for the finite element solution zh to the discrete dual problem (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='67) is ∥z+ h,0∥Ω0 + N � n=1 � In ∥ ˙zh∥Ω0 + ∥∆nzh∥Ω0 dt + N−1 � n=1 ∥[zh]n∥Ω0 + ∥z− h,N∥Ω0 ≤ CN∥z+ h,N∥Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) where CN = C(log(tN/kN) + 1)1/2 and C > 0 is a constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To prove Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 and thus also Corollary 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1, we need two other stability estimates for the finite element problem (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We start by letting f ≡ 0 in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We have: Find uh ∈ Vh such that N � n=1 � In ( ˙uh, v)Ω0 dt + N � n=1 � In An(uh, v) dt + N � n=1 ([uh]n−1, v+ n−1)Ω0 = 0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) for all v ∈ Vh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 The basic stability estimate The first of the two auxiliary stability estimates is presented as the following lemma: Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (The basic stability estimate).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let uh be the solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) with f ≡ 0 and let u0 be the initial value of the analytic solution of the problem presented in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that ∥u− h,N∥2 Ω0 + N � n=1 � In |||uh|||2 An dt + N � n=1 ∥[uh]n−1∥2 Ω0 ≤ C∥u0∥2 Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) 24 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By taking v = 2uh ∈ Vh in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5), we have N � n=1 � In 2( ˙uh, uh)Ω0 dt � �� � =I + 2 N � n=1 � In An(uh, uh) dt � �� � =II + N � n=1 2([uh]n−1, u+ h,n−1)Ω0 � �� � =III = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) We consider the terms in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) separately, starting with the first: I = N � n=1 � In 2( ˙uh, uh)Ω0 dt = N � n=1 � In � Ω0 2 ˙uhuh dx dt = N � n=1 � In � Ω0 ∂t(u2 h) dx dt = N � n=1 � In ∂t � � Ω0 u2 h dx � dt = N � n=1 � In ∂t∥uh∥2 Ω0 dt = N � n=1 � ∥u− h,n∥2 Ω0 − ∥u+ h,n−1∥2 Ω0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) For the treatment of the second term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7), we note that we may apply Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1, since uh(·, t) ∈ Vh(t) for any t ∈ (0, T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We thus have II = 2 N � n=1 � In An(uh, uh) dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) ≥ 2 N � n=1 � In αt |||uh|||2 An dt ≥ 2 min t∈(0,T]{αt} N � n=1 � In |||uh|||2 An dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) We move on to the third term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We note that for n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, we have from the algebraic identity (a − b)2 = a2 − 2ab + b2 that 2([uh]n−1, u+ h,n−1)Ω0 = ∥[uh]n−1∥2 Ω0 + ∥u+ h,n−1∥2 Ω0 − ∥[uh]n−1 − u+ h,n−1∥2 Ω0 = ∥[uh]n−1∥2 Ω0 + ∥u+ h,n−1∥2 Ω0 − ∥u− h,n−1∥2 Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) With (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10), the third term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) is III = N � n=1 2([uh]n−1, u+ h,n−1)Ω0 = N � n=1 ∥[uh]n−1∥2 Ω0 + N � n=1 � ∥u+ h,n−1∥2 Ω0 − ∥u− h,n−1∥2 Ω0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) Now we add the second term on the right-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) to the right-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) 25 to obtain N � n=1 � ∥u− h,n∥2 Ω0 − ∥u+ h,n−1∥2 Ω0 � + N � n=1 � ∥u+ h,n−1∥2 Ω0 − ∥u− h,n−1∥2 Ω0 � = N � n=1 � ∥u− h,n∥2 Ω0 −∥u+ h,n−1∥2 Ω0 + ∥u+ h,n−1∥2 Ω0 � �� � = 0 −∥u− h,n−1∥2 Ω0 � = N−1 � n=1 � ∥u− h,n∥2 Ω0 − ∥u− h,n∥2 Ω0 � �� � = 0 � + � ∥u− h,N∥2 Ω0 − ∥u− h,0∥2 Ω0 � = ∥u− h,N∥2 Ω0 − ∥u− h,0∥2 Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) Inserting (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8), (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) into (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7), and using (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12), we have 2 min t∈(0,T]{αt} N � n=1 � In |||uh|||2 An dt + N � n=1 ∥[uh]n−1∥2 Ω0 + ∥u− h,N∥2 Ω0 − ∥u− h,0∥2 Ω0 ≤ 0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) which implies min � 2 min t∈(0,T]{αt}, 1 �� N � n=1 � In |||uh|||2 An dt + N � n=1 ∥[uh]n−1∥2 Ω0 + ∥u− h,N∥2 Ω0 � ≤ ∥u− h,0∥2 Ω0 = ∥P0u0∥2 Ω0 ≤ ∥u0∥2 Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) where we have used that u− h,0 = P0u0 in the last identity, where P0 is defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The last inequality follows from the boundedness of P0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Dividing both sides of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) by the min-factor on the left-hand side gives (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 The strong stability estimate Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 (The strong stability estimate).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let uh be the solution of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) with f ≡ 0 and let u0 be the initial value of the analytic solution of the problem presented in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that N � n=1 tn � In ∥ ˙uh∥2 Ω0 + ∥∆nuh∥2 Ω0 dt + N � n=2 tn kn ∥[uh]n−1∥2 Ω0 ≤ C∥u0∥2 Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By taking v = −∆nuh ∈ Vh in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5), we have � In ( ˙uh, −∆nuh)Ω0 dt � �� � =I + � In An(uh, −∆nuh) dt � �� � =II + ([uh]n−1, (−∆nuh)+ n−1)Ω0 � �� � =III = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) 26 We consider the terms separately, starting with the first: I = � In ( ˙uh, −∆nuh)Ω0 dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = � In An( ˙uh, uh) dt = � In 1 2∂tAn(uh, uh) dt = 1 2An(u− h,n, u− h,n) − 1 2An(u+ h,n−1, u+ h,n−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) The second term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) is II = � In An(uh, −∆nuh) dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = � In (−∆nuh, −∆nuh)Ω0 dt = � In ∥∆nuh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) The third term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) is III = ([uh]n−1, (−∆nuh)+ n−1)Ω0 = (u+ h,n−1 − u− h,n−1, −∆nu+ h,n−1)Ω0 = (u+ h,n−1, −∆nu+ h,n−1)Ω0 − (u− h,n−1, −∆nu+ h,n−1)Ω0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = An(u+ h,n−1, u+ h,n−1) − (u− h,n−1, −∆nu+ h,n−1)Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19) Using the identities (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17), (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18), and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19) in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16), we may obtain 1 2An(u− h,n, u− h,n) + � In ∥∆nuh∥2 Ω0 dt + 1 2An(u+ h,n−1, u+ h,n−1) = (u− h,n−1, −∆nu+ h,n−1)Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) For n = 1, the right-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) is (u− h,n−1, −∆nu+ h,n−1)Ω0 = (u− h,0, −∆1u+ h,0)Ω0 = (P0u0, −∆1u+ h,0)Ω0 ≤ ∥P0u0∥Ω0∥∆1u+ h,0∥Ω0 ≤ 1 2εk1 ∥P0u0∥2 Ω0 + εk1 2 ∥∆1u+ h,0∥2 Ω0 ≤ 1 2εt1 ∥u0∥2 Ω0 + εC � I1 ∥∆1uh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) For n = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, we would like to use (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', the definition of the discrete Laplacian, on the right-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20), but due to the dG(0) movement of TG, u− h,n−1 /∈ Vh,n, so we cannot use (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) directly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To handle this, we make use of the shift operator Sn : Vh,n−1 → Vh,n, defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45), as follows: (u− h,n−1, −∆nu+ h,n−1)Ω0 = (u− h,n−1 ± Snu− h,n−1, −∆nu+ h,n−1)Ω0 = ((1 − Sn)u− h,n−1, −∆nu+ h,n−1)Ω0 � �� � =RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='I + (Snu− h,n−1, −∆nu+ h,n−1)Ω0 � �� � =RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='II .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) 27 We treat the terms separately, starting with the first: RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='I = ((1 − Sn)u− h,n−1, −∆nu+ h,n−1)Ω0 ≤ ∥(1 − Sn)u− h,n−1∥Ω0∥∆nu+ h,n−1∥Ω0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='49) ≤ Ch ������u− h,n−1 ������ An−1 ∥∆nu+ h,n−1∥Ω0 ≤ C ε1 h ������u− h,n−1 ������2 An−1 + ε1Ch∥∆nu+ h,n−1∥2 Ω0 (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) ≤ C ε1 kn−1 ������u− h,n−1 ������2 An−1 + ε1Ckn∥∆nu+ h,n−1∥2 Ω0 ≤ C ε1 � In−1 |||uh|||2 An−1 dt + ε1C � In ∥∆nuh∥2 Ω0 dt, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) where we have used Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', an estimate for the shift error, to obtain the second inequality, and the spatiotemporal quasiuniformity to obtain the fourth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We consider the second term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For the sake of brevity we write S = Sn, U = uh,n−1 and thus have RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='II = (Snu− h,n−1 � �� � ∈Vh,n , −∆nu+ h,n−1)Ω0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = An(Snu− h,n−1, u+ h,n−1) = An(S U −, U +).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) From the algebraic identity (a − b)2 = a2 − 2ab + b2 we have that 2An(S U −, U +) = An(S U −, S U −) + An(U +, U +) − An(S U − − U +, S U − − U +) � �� � ≥0 from (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) ≤ An(S U −, S U −) + An(U +, U +), (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) where we have used Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', the discrete coercivity of Ah,t, to obtain the inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Dividing both sides of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) by 2, we get that RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='II = An(Snu− h,n−1, u+ h,n−1) ≤ 1 2An(Snu− h,n−1, Snu− h,n−1) + 1 2An(u+ h,n−1, u+ h,n−1) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='54) ≤ 1 2An−1(u− h,n−1, u− h,n−1) + Ch ������u− h,n−1 ������ An−1 ∥∆n−1u− h,n−1∥Ω0 + 1 2An(u+ h,n−1, u+ h,n−1) (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) ≤ 1 2An−1(u− h,n−1, u− h,n−1) + 1 2An(u+ h,n−1, u+ h,n−1) + C ε2 � In−1 |||uh|||2 An−1 dt + ε2C � In−1 ∥∆n−1uh∥2 Ω0 dt, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='26) where we have used Corollary 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', the stability result of Sn, in the third step, and then treated the resulting middle term by following the steps in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We now collect the 28 estimates for the right-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 1, we use (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) and get 1 2A1(u− h,1, u− h,1) + � I1 ∥∆1uh∥2 Ω0 dt + 1 2A1(u+ h,0, u+ h,0) ≤ 1 2εt1 ∥u0∥2 Ω0 + εC � I1 ∥∆1uh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='27) Using that A1(u+ h,0, u+ h,0) ≥ 0 from (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11), moving over the last term on the right-hand side to the left-hand side, and multiplying both sides by t1, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='27) becomes t1 2 A1(u− h,1, u− h,1) + (1 − εC)t1 � I1 ∥∆1uh∥2 Ω0 dt ≤ t1 2εt1 ∥u0∥2 Ω0 = 1 2ε∥u0∥2 Ω0, =⇒ t1 2 A1(u− h,1, u− h,1) + Ct1 � I1 ∥∆1uh∥2 Ω0 dt ≤ C∥u0∥2 Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='28) where the last step follows from taking ε sufficiently small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, we insert (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) into (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20), use (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='26), and thus get 1 2An(u− h,n, u− h,n) + � In ∥∆nuh∥2 Ω0 dt + 1 2An(u+ h,n−1, u+ h,n−1) = (u− h,n−1, −∆nu+ h,n−1)Ω0 (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) = RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='I + RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='II (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23),(5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='26) ≤ C ε1 � In−1 |||uh|||2 An−1 dt + ε1C � In ∥∆nuh∥2 Ω0 dt + 1 2An−1(u− h,n−1, u− h,n−1) + 1 2An(u+ h,n−1, u+ h,n−1) + C ε2 � In−1 |||uh|||2 An−1 dt + ε2C � In−1 ∥∆n−1uh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='29) Cancelling 1 2An(u+ h,n−1, u+ h,n−1) on both sides, moving over the second term on the right- hand side to the left-hand side, and multiplying both sides by tn, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='29) becomes tn 2 An(u− h,n, u− h,n) + (1 − ε1C)tn � In ∥∆nuh∥2 Ω0 dt ≤ tn �C ε1 + C ε2 � � In−1 |||uh|||2 An−1 dt + tnε2C � In−1 ∥∆n−1uh∥2 Ω0 dt + tn 2 An−1(u− h,n−1, u− h,n−1), =⇒ tn 2 An(u− h,n, u− h,n) + Ctn � In ∥∆nuh∥2 Ω0 dt − tn−1 2 An−1(u− h,n−1, u− h,n−1) ≤ C � In−1 |||uh|||2 An−1 dt + tnε2C � In−1 ∥∆n−1uh∥2 Ω0 dt + tn 2 An−1(u− h,n−1, u− h,n−1) − tn−1 2 An−1(u− h,n−1, u− h,n−1), (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='30) 29 where the step after the arrow follows from taking ε1 sufficiently small and subtracting tn−1 2 An−1(u− h,n−1, u− h,n−1) on both sides.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The difference in the last row of the right-hand side is tn 2 An−1(u− h,n−1, u− h,n−1) − tn−1 2 An−1(u− h,n−1, u− h,n−1) = kn 2 An−1(u− h,n−1, u− h,n−1) ≤ Ckn ������u− h,n−1 ������2 An−1 ≤ Ckn−1 ������u− h,n−1 ������2 An−1 ≤ C � In−1 |||uh|||2 An−1 dt, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='31) which we combine with the first term on the right-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='30).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Summing (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='30) over n = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N and adding (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='28), we obtain N � n=1 tn 2 An(u− h,n, u− h,n) + C N � n=1 tn � In ∥∆nuh∥2 Ω0 dt − N � n=2 tn−1 2 An−1(u− h,n−1, u− h,n−1) ≤ C∥u0∥2 Ω0 + C N � n=2 � In−1 |||uh|||2 An−1 dt + N � n=2 tnε2C � In−1 ∥∆n−1uh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='32) The tnAn-terms in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='32) form a telescoping sum where only the tNAN-term remains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Also using that tn ≤ Ctn−1 for n = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, which follows from the quasi-uniformity of the temporal discretization, we thus have tN 2 AN(u− h,N, u− h,N) + C N � n=1 tn � In ∥∆nuh∥2 Ω0 dt ≤ C∥u0∥2 Ω0 + C N � n=2 � In−1 |||uh|||2 An−1 dt + ε2C N � n=2 tn−1 � In−1 ∥∆n−1uh∥2 Ω0 dt (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) ≤ C∥u0∥2 Ω0 + C∥u0∥2 Ω0 + ε2C N−1 � n=1 tn � In ∥∆nuh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='33) Moving the last term on the right-hand side to the left-hand side and taking ε2 sufficiently small, gives us the following stability estimate for the discrete Laplacian of the finite element solution: N � n=1 tn � In ∥∆nuh∥2 Ω0 dt ≤ C∥u0∥2 Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='34) It is thus sufficient to estimate the time-derivative terms and the time jump terms on the left-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) by the left-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='34) to obtain (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We proceed by showing an estimate for the time-derivative terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By taking v = 30 (t − tn−1) ˙uh in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5), we have � In ( ˙uh, (t − tn−1) ˙uh)Ω0 dt � �� � =I + � In An(uh, (t − tn−1) ˙uh) dt � �� � =II + + ([uh]n−1, ((t − tn−1) ˙uh)+ n−1)Ω0 � �� � =III = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='35) We consider the terms separately, starting with the first: I = � In ( ˙uh, (t − tn−1) ˙uh)Ω0 dt = � In (t − tn−1)( ˙uh, ˙uh)Ω0 dt = � In (t − tn−1)∥ ˙uh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='36) The second term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='35) is II = � In An(uh, (t − tn−1) ˙uh) dt = � In (t − tn−1)An(uh, ˙uh) dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = � In (t − tn−1)(−∆nuh, ˙uh)Ω0 dt (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='37) The third term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='35) is III = ([uh]n−1, ((t − tn−1) ˙uh)+ n−1)Ω0 = ([uh]n−1, (tn−1 − tn−1) � �� � =0 ˙u+ h,n−1)Ω0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='38) Using the identities (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='36), (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='37), and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='38) in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='35), we may obtain � In (t − tn−1)∥ ˙uh∥2 Ω0 dt = � In (t − tn−1)(∆nuh, ˙uh)Ω0 dt ≤ � In (t − tn−1)∥∆nuh∥Ω0∥ ˙uh∥Ω0 dt ≤ � � In (t − tn−1)∥∆nuh∥2 Ω0 dt �1/2� � In (t − tn−1)∥ ˙uh∥2 Ω0 dt �1/2 , =⇒ � In (t − tn−1)∥ ˙uh∥2 Ω0 dt ≤ � In (t − tn−1)∥∆nuh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='39) By using an inverse estimate and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='39), we have � In ∥ ˙uh∥2 Ω0 dt ≤ Ck−1 n � In (t − tn−1)∥ ˙uh∥2 Ω0 dt ≤ Ck−1 n � In (t − tn−1)∥∆nuh∥2 Ω0 dt ≤ Ck−1 n kn � In ∥∆nuh∥2 Ω0 dt = C � In ∥∆nuh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='40) 31 We proceed by showing an estimate for the time jump terms for n = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We would like to take v = [uh]n−1 = u+ h,n−1 − u− h,n−1 in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5), but due to the dG(0) movement of TG, u− h,n−1 /∈ Vh,n as already pointed out, so we cannot make this choice of v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To handle this, we use the L2(Ω0)-projection Pn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By taking v = Pn[uh]n−1 in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5), we have � In ( ˙uh, Pn[uh]n−1)Ω0 dt � �� � =I + � In An(uh, Pn[uh]n−1) dt � �� � =II + + ([uh]n−1, (Pn[uh]n−1)+ n−1)Ω0 � �� � =III = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='41) We consider the terms separately, starting with the first: I = � In ( ˙uh, Pn[uh]n−1)Ω0 dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) = � In ( ˙uh, [uh]n−1)Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='42) The second term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='35) is II = � In An(uh, Pn[uh]n−1) dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = � In (−∆nuh, Pn[uh]n−1)Ω0 dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) = � In (−∆nuh, [uh]n−1)Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='43) The third term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='41) is III = ([uh]n−1, (Pn[uh]n−1)+ n−1)Ω0 = ([uh]n−1, Pn[uh]n−1)Ω0 = ([uh]n−1, Pn[uh]n−1 ± [uh]n−1)Ω0 = ([uh]n−1, Pn[uh]n−1 − [uh]n−1)Ω0 + ([uh]n−1, [uh]n−1)Ω0 = ∥[uh]n−1∥2 Ω0 − ([uh]n−1, (1 − Pn)[uh]n−1)Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='44) Using the identities (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='42), (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='43), and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='44) in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='41), we may obtain ∥[uh]n−1∥2 Ω0 = ([uh]n−1, (1 − Pn)[uh]n−1)Ω0 � �� � =RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='I + � In (∆nuh − ˙uh, [uh]n−1)Ω0 dt � �� � =RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='II .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45) 32 We treat the terms separately, starting with the first.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, the first term is RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='I = ([uh]n−1, (1 − Pn)[uh]n−1)Ω0 = ([uh]n−1, [uh]n−1 − Pn[uh]n−1)Ω0 = ([uh]n−1, u+ h,n−1 − u− h,n−1 − Pnu+ h,n−1 � �� � =u+ h,n−1 +Pnu− h,n−1)Ω0 = ([uh]n−1, −(u− h,n−1 − Pnu− h,n−1))Ω0 ≤ ∥[uh]n−1∥Ω0∥u− h,n−1 − Pnu− h,n−1∥Ω0 ≤ ∥[uh]n−1∥Ω0∥u− h,n−1 − Snu− h,n−1∥Ω0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='49) ≤ ∥[uh]n−1∥Ω0Ch ������u− h,n−1 ������ An−1 ≤ 1 4∥[uh]n−1∥2 Ω0 + (Ch)2 ������u− h,n−1 ������2 An−1 (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) ≤ 1 4∥[uh]n−1∥2 Ω0 + Cknkn−1 ������u− h,n−1 ������2 An−1 ≤ 1 4∥[uh]n−1∥2 Ω0 + Ckn � In−1 |||uh|||2 An−1 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='46) The second term in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45) is RHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='II = � In (∆nuh − ˙uh, [uh]n−1)Ω0 dt ≤ � In ∥∆nuh − ˙uh∥Ω0∥[uh]n−1∥Ω0 dt = ∥[uh]n−1∥Ω0 � In ∥∆nuh − ˙uh∥Ω0 dt ≤ ∥[uh]n−1∥Ω0 � � In 12 dt �1/2� � In ∥∆nuh − ˙uh∥2 Ω0 dt �1/2 = ∥[uh]n−1∥Ω0 � kn � In ∥∆nuh − ˙uh∥2 Ω0 dt �1/2 = 1 4∥[uh]n−1∥2 Ω0 + kn � In ∥∆nuh − ˙uh∥2 Ω0 dt ≤ 1 4∥[uh]n−1∥2 Ω0 + 2kn � In ∥∆nuh∥2 Ω0 + ∥ ˙uh∥2 Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='47) Using the estimates (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='46) and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='47) in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='45), we may obtain, for n = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, that 1 2∥[uh]n−1∥2 Ω0 ≤ Ckn � In−1 |||uh|||2 An−1 dt + 2kn � In ∥∆nuh∥2 Ω0 + ∥ ˙uh∥2 Ω0 dt, =⇒ 1 kn ∥[uh]n−1∥2 Ω0 ≤ C � In ∥ ˙uh∥2 Ω0 + ∥∆nuh∥2 Ω0 dt + C � In−1 |||uh|||2 An−1 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='48) Finally we have all the partial results that are needed to show the desired stability 33 estimate (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We start with the left-hand side of (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' N � n=1 tn � In ∥ ˙uh∥2 Ω0 + ∥∆nuh∥2 Ω0 dt + N � n=2 tn kn ∥[uh]n−1∥2 Ω0 (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='48) ≤ N � n=1 tn � In ∥ ˙uh∥2 Ω0 + ∥∆nuh∥2 Ω0 dt + N � n=2 tn � C � In ∥ ˙uh∥2 Ω0 + ∥∆nuh∥2 Ω0 dt + C � In−1 |||uh|||2 An−1 dt � ≤ C N � n=1 tn � In ∥ ˙uh∥2 Ω0 + ∥∆nuh∥2 Ω0 dt + C N � n=2 � In−1 |||uh|||2 An−1 dt (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) ≤ C N � n=1 tn � � In ∥ ˙uh∥2 Ω0 dt + � In ∥∆nuh∥2 Ω0 dt � + C∥u0∥2 Ω0 (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='40) ≤ C N � n=1 tn � C � In ∥∆nuh∥2 Ω0 dt + � In ∥∆nuh∥2 Ω0 dt � + C∥u0∥2 Ω0 ≤ C N � n=1 tn � In ∥∆nuh∥2 Ω0 dt + C∥u0∥2 Ω0 (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='34) ≤ C∥u0∥2 Ω0 + C∥u0∥2 Ω0 = C∥u0∥2 Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='49) which shows (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This concludes the proof of Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 Proof of Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (The main stability estimate) For the proof of Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1, we will use some additional inequalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N and an, bn ≥ 0, we have N � n=1 b2 n ≥ � N � n=1 anbn �2� N � n=1 a2 n �−1 , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='50) which comes from Cauchy-Schwarz inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Noting that k1 = t1, since t0 = 0, we have N � n=1 kn tn = 1 + N � n=2 kn tn ≤ 1 + � tN t1 1 t dt = 1 + log(tN/k1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='51) We are now ready to prove Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The proof idea is to derive lower bounds for separate terms on the left-hand sides of the auxiliary stability estimates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' These lower bounds will then be used to obtain the 34 main stability estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From the basic stability estimate (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6), we have C∥u0∥2 Ω0 ≥ ∥u− h,N∥2 Ω0 + ∥[uh]0∥2 Ω0 ≥ 1 2(∥u− h,N∥Ω0 + ∥[uh]0∥Ω0)2, =⇒ ∥u− h,N∥Ω0 + ∥[uh]0∥Ω0 ≤ C∥u0∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='52) In the strong stability estimate (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15), we may categorize the terms on the left-hand side as either integral terms or jump terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Since the treatment will be the same for both the integral terms, we consider a generic integral term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We thus have from (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) that C∥u0∥2 Ω0 ≥ N � n=1 tn � In ∥w∥2 Ω0 dt = N � n=1 tn kn � � In 12 dt �� � In ∥w∥2 Ω0 dt � ≥ N � n=1 tn kn � � In ∥w∥Ω0 dt �2 ≥ � N � n=1 kn tn �−1� N � n=1 � In ∥w∥Ω0 dt �2 , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='53) where we have used Cauchy-Schwarz inequality to obtain the second inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To obtain the last inequality, we have used (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='50) with an = � kn/tn and bn = � tn/kn � In ∥w∥Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='53), we have N � n=1 � In ∥w∥Ω0 dt ≤ C � N � n=1 kn tn �1/2 ∥u0∥Ω0 ≤ C(1 + log(tN/k1))1/2∥u0∥Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='54) where we have used (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='51) to obtain the last inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We move on to the jump terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15), we have C∥u0∥2 Ω0 ≥ N � n=2 tn kn ∥[uh]n−1∥2 Ω0 ≥ � N � n=2 kn tn �−1� N � n=2 ∥[uh]n−1∥Ω0 �2 , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='55) where we have used (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='50) with an = � kn/tn and bn = � tn/kn∥[uh]n−1∥Ω0, to obtain the last inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='55), we have N � n=2 ∥[uh]n−1∥Ω0 ≤ C � N � n=2 kn tn �1/2 ∥u0∥Ω0 ≤ C(1 + log(tN/k1))1/2∥u0∥Ω0, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56) where we have used (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='51) to obtain the last inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By adding (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='52), (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='54) for both integral terms in (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15), and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='56), and noting that log(tN/k1) ≥ 0, we may obtain the main stability estimate (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This concludes the proof of Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 35 6 A priori error analysis To prove an a priori error estimate, we follow the methodology presented by Eriksson and Johnson in [12, 13] and make only minor modifications to account for the CutFEM setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (An optimal order a priori error estimate in ∥ · ∥Ω0 at the final time).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let u be the solution of (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) and let uh be the finite element solution defined by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then, for q = 0, 1, we have that ∥u(tN) − u− h,N∥Ω0 ≤ CN max 1≤n≤N � k2q+1 n ∥ ˙u(2q+1)∥Ω0,In + hp+1∥Dp+1 x u∥Ω0,In � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) where ∥ · ∥Ω0 = ∥ · ∥L2(Ω0), CN = C(log(tN/kN) + 1)1/2, where C > 0 is a constant, kn = tn−tn−1, ∥w∥Ω0,In = maxt∈In ∥w∥Ω0, ˙u(2q+1) = ∂2q+1u/∂t2q+1, h is the largest diameter of a simplex in T0 ∪ TG, and Dx denotes the derivative with respect to space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let e = u − uh denote the approximation error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We start by splitting the error using the interpolant ˜u = ˜InRnu ∈ Vh, where ˜In is the temporal interpolation operator defined by (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13), and Rn is the Ritz projection operator defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16), e = u − uh ±˜u ���� =0 = (u − ˜u) � �� � =ρ + (˜u − uh) � �� � =θ = ρ + θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) We then consider ∥u(tN) − u− h,N∥Ω0 = ∥e− N∥Ω0 = ∥(ρ + θ)− N∥Ω0 ≤ ∥ρ− N∥Ω0 � �� � The ρ-part + ∥θ− N∥Ω0 � �� � The θ-part , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) where we treat the ρ-part and the θ-part separately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Estimation of the ρ-part Here, we consider the term in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) involving ρ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' First we note that since ρ = u − ˜u = u − ˜InRnu, we have for n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, ∥ρ− n ∥Ω0 = ∥(u − ˜InRnu)− n ∥Ω0 = ∥u− n − (˜InRnu)− n ∥Ω0 (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13a) = ∥u− n − (Rnu)− n ∥Ω0 = ∥u− n − Rnu− n ∥Ω0 ≤ ∥u − Rnu∥Ω0,In, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) where ∥w∥Ω0,In = maxt∈In ∥w∥Ω0 = maxt∈In ∥w(·, t)∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The ρ-part in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) is thus ∥ρ− N∥Ω0 (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) ≤ ∥u − Rnu∥Ω0,IN ≤ max 1≤n≤N � ∥u − Rnu∥Ω0,In � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) Estimation of the θ-part Here, we consider the term in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) involving θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We first note that from the Galerkin orthogonality (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='65), we have Bh(θ, zh) = −Bh(ρ, zh), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) 36 where we have used e = ρ + θ and chosen v = zh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Since θ = ˜u − uh ∈ Vh is a permissible test function for the discrete dual problem (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='66), we may take v = θ in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='66) and choose z+ h,N = θ− N to obtain Bh(θ, zh) = ∥θ− N∥2 Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) Combining (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7), and using Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6, we obtain the error representation ∥θ− N∥2 Ω0 = − Bh(ρ, zh) = N � n=1 � In (ρ, ˙zh)Ω0 dt � �� � =I + N � n=1 − � In Ah,t(ρ, zh) dt � �� � =II + N−1 � n=1 (ρ− n , [zh]n)Ω0 � �� � =III + −(ρ− N, z− h,N)Ω0 � �� � =IV .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) We consider the terms on the right-hand side of (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) separately, starting with the first sum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We note that for q = 0, the first terms vanishes, since for every x ∈ Ω0, zh(x, ·)|In ∈ P0(In) which means that ˙zh(x, ·)|In = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, we thus have I ���� q=0 = � In (ρ, ˙zh)Ω0 dt = 0, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) and I ���� q≥1 = � In (ρ, ˙zh)Ω0 dt = � In � Ω0 (u − ˜InRnu) ˙zh dx dt = � Ω0 � � In u ˙zh dt − � In ˜InRnu ˙zh ���� ∈V n,q−1 h dt � dx (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13b) = � Ω0 � � In u ˙zh dt − � In Rnu ˙zh dt � dx = � In � Ω0 (u − Rnu) ˙zh dx dt = � In (u − Rnu, ˙zh)Ω0 dt ≤∥u − Rnu∥Ω0,In � In ∥ ˙zh∥Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, the terms in the second sum on the right-hand side of (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) is II = − � In Ah,t(ρ, zh) dt = − � In An(u − ˜u, zh) dt = − � In An(u, zh) − An(˜u, zh) dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) = − � In An(Rnu, zh) − An(˜u, zh) dt = � In −An(Rnu − ˜u, zh) dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = � In (Rnu − ˜u, ∆nzh)Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) 37 The subsequent treatment of II is different for q = 0 and q ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For q = 0, we continue by writing II ���� q=0 = � In (Rnu − ˜u, ∆nzh)Ω0 dt ≤ � In ∥Rnu − ˜u∥Ω0∥∆nzh∥Ω0 dt ≤ ∥Rnu − ˜u∥Ω0,In � In ∥∆nzh∥Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) For q ≥ 1, we may instead continue by writing II ���� q≥1 = � In (Rnu − ˜u, ∆nzh)Ω0 dt = � In (Rnu − ˜u, ∆n � z− h,n + � t tn ˙zh ds � )Ω0 dt = � In (Rnu − ˜u, ∆nz− h,n)Ω0 dt � �� � =II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 + � In (Rnu − ˜u, ∆n � � t tn ˙zh ds � )Ω0 dt � �� � =II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) We consider II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 and II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 separately, starting with the former.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 = � In (Rnu − ˜u, ∆nz− h,n)Ω0 dt = � In � Ω0 (Rnu − ˜InRnu)∆nz− h,n dx dt = � Ω0 � � In Rnu∆nz− h,n dt − � In ˜InRnu ∆nz− h,n � �� � ∈V n,q−1 h dt � dx (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13b) = � Ω0 � � In Rnu∆nz− h,n dt − � In Rnu∆nz− h,n dt � �� � =0 � dx = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) For q = 1, we may treat II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 in the following way: II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 ���� q=1 = � In (Rnu − ˜u, ∆n � � t tn ˙zh ds � )Ω0 dt q=1 = � In (Rnu − ˜u, ∆n{(t − tn) ˙zh})Ω0 dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = � In −An(Rnu − ˜u, (t − tn) ˙zh) dt (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = � In (∆n{Rnu − ˜u}, (t − tn) ˙zh)Ω0 dt ≤ � In |t − tn|∥∆n{Rnu − ˜u}∥Ω0∥ ˙zh∥Ω0 dt ≤ kn∥∆n{Rnu − ˜u}∥Ω0,In � In ∥ ˙zh∥Ω0 dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) 38 For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N − 1, the terms in the third sum on the right-hand side of (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) is III = (ρ− n , [zh]n)Ω0 ≤ ∥ρ− n ∥Ω0∥[zh]n∥Ω0 (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) ≤ ∥u − Rnu∥Ω0,In∥[zh]n∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) The fourth term on the right-hand side of (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) is treated in the exact same way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' IV = −(ρ− N, z− h,N)Ω0 ≤ ∥ρ− N∥Ω0∥z− h,N∥Ω0 (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) ≤ ∥u − Rnu∥Ω0,IN∥z− h,N∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) Summing up what we have for q = 0, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' inserting (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16), and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) into (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8), we obtain ∥θ− N∥2 Ω0 ���� q=0 ≤ N � n=1 ∥Rnu − ˜u∥Ω0,In � In ∥∆nzh∥Ω0 dt + N−1 � n=1 ∥u − Rnu∥Ω0,In∥[zh]n∥Ω0 + ∥u − Rnu∥Ω0,IN∥z− h,N∥Ω0 ≤ max 1≤n≤N � ∥u − Rnu∥Ω0,In + ∥Rnu − ˜u∥Ω0,In � × × � N � n=1 � In ∥∆nzh∥Ω0 dt + N−1 � n=1 ∥[zh]n∥Ω0 + ∥z− h,N∥Ω0 � ≤ CNF0(u)∥θ− N∥Ω0, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) where F0(u) is the factor with the max-function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To obtain the second inequality, we have taken the maximum over 1 ≤ n ≤ N of all the left-hand factors in every term on the left-hand side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To obtain the last inequality, we have used the stability estimate (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) with z+ h,N = θ− N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Analogously, summing up what we have for q = 1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' inserting (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16), and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) into (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8), where we have inserted (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) into (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13), we obtain 39 ∥θ− N∥2 Ω0 ���� q=1 ≤ N � n=1 ∥u − Rnu∥Ω0,In � In ∥ ˙zh∥Ω0 dt + N � n=1 kn∥∆n{Rnu − ˜u}∥Ω0,In � In ∥ ˙zh∥Ω0 dt dt + N−1 � n=1 ∥u − Rnu∥Ω0,In∥[zh]n∥Ω0 + ∥u − Rnu∥Ω0,IN∥z− h,N∥Ω0 ≤ max 1≤n≤N � ∥u − Rnu∥Ω0,In + kn∥∆n{Rnu − ˜u}∥Ω0,In � × × � 2 N � n=1 � In ∥ ˙zh∥Ω0 dt + N−1 � n=1 ∥[zh]n∥Ω0 + ∥z− h,N∥Ω0 � ≤ CNF1(u)∥θ− N∥Ω0, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19) where F1(u) is the factor with the max-function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To obtain the second inequality, we have taken the maximum over 1 ≤ n ≤ N of all the left-hand factors in every term on the left-hand side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To obtain the last inequality, we have used the stability estimate (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) with z+ h,N = θ− N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Dividing both sides in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19) by ∥θ− N∥Ω0, the estimation of the θ-part for q = 0, 1, finally becomes ∥θ− N∥Ω0 ≤ CNFq(u).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) Estimation of Fq(u) Now we need an estimate for Fq(u).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19), we note that we may write Fq(u) for q = 0, 1, as Fq(u) = max 1≤n≤N � ∥u − Rnu∥Ω0,In � �� � =I +(1 − q) ∥Rnu − ˜u∥Ω0,In � �� � =II + qkn ∥∆n{Rnu − ˜u}∥Ω0,In � �� � =III � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) We treat the terms separately, starting with the first for which we use Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2: I = ∥u − Rnu∥Ω0,In (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) ≤ CIhp+1∥Dp+1 x u∥Ω0,In, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) where CI > 0 is a constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The second term on the right-hand side of (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) is II = ∥Rnu − ˜u∥Ω0,In = ∥Rnu − ˜InRnu + ˜Inu − ˜Inu + u − u∥Ω0,In ≤ ∥u − Rnu∥Ω0,In + ∥˜In(u − Rnu)∥Ω0,In + ∥u − ˜Inu∥Ω0,In ≤ CII � hp+1∥Dp+1 x u∥Ω0,In + kq+1 n ∥ ˙u(q+1)∥Ω0,In � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) 40 where CII > 0 is a constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We have used (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) on the first term in the second row of (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' On the second term, we have first used the boundedness of ˜In from Lemma B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3, and then applied (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' On the last term in the second row of (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23), we have used (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) from Lemma B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We move on to the third term in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Note that this term is only present for q = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To treat it we will use the following: For ψ ∈ H2(Ω0) and v ∈ Vh,n we have that (−∆nRnψ, v)Ω0 (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) = An(Rnψ, v) (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) = An(ψ, v) (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) = (−∆ψ, v)Ω0 =⇒ ∥∆nRnψ∥2 Ω0 = (−∆ψ, −∆nRnψ) ≤ ∥∆ψ∥Ω0∥∆nRnψ∥Ω0, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) where we have used the definitions of ∆n and Rn, and Corollary A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', partial integration in broken Sobolev spaces with bilinear forms A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Dividing both sides by a factor ∥∆nRnψ∥Ω0 gives ∥∆nRnψ∥Ω0 ≤ ∥∆ψ∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) The third term in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) is III = ∥∆n{Rnu − ˜u}∥Ω0,In = ∥∆n{Rnu − ˜InRnu}∥Ω0,In (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) ≤ Ck2 n∥∆n{∂2 t Rnu}∥Ω0,In = Ck2 n∥∆nRn ˙u(2)∥Ω0,In (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) ≤ Ck2 n∥∆ ˙u(2)∥Ω0,In = CIIIk2 n∥ ˙u(3)∥Ω0,In, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='26) where CIII > 0 is a constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' With the insertion of (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23), and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='26) in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21), we get for q = 0, 1 Fq(u) ≤ max 1≤n≤N � CIhp+1∥Dp+1 x u∥Ω0,In + (1 − q)CII � hp+1∥Dp+1 x u∥Ω0,In + kq+1 n ∥ ˙u(q+1)∥Ω0,In � + qknCIIIk2 n∥ ˙u(3)∥Ω0,In � = max 1≤n≤N � (CI + (1 − q)CII)hp+1∥Dp+1 x u∥Ω0,In + (1 − q)CIIkq+1 n ∥ ˙u(q+1)∥Ω0,In + qCIIIk3 n∥ ˙u(3)∥Ω0,In � ≤ C max 1≤n≤N � hp+1∥Dp+1 x u∥Ω0,In + k2q+1 n ∥ ˙u(2q+1)∥Ω0,In � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='27) where C > 0 is a constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The final step To obtain the desired error estimate, we insert the estimations of the ρ-part (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) and the 41 θ-part (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) to obtain ∥u(tN) − u− h,N∥Ω0 ≤ ∥ρ− N∥Ω0 + ∥θ− N∥Ω0 (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5),(6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) ≤ max 1≤n≤N � ∥u − Rnu∥Ω0,In � � �� � ≤Fq(u) +CNFq(u) ≤ (1 + CN)Fq(u) ≤ CNFq(u) (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='27) ≤ CN max 1≤n≤N � k2q+1 n ∥ ˙u(2q+1)∥Ω0,In + hp+1∥Dp+1 x u∥Ω0,In � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='28) where we have used the estimation of Fq(u), given by (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='27).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This concludes the proof of Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 42 7 Numerical results Here we present numerical results for the implementation of (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) for the following model problem in one spatial dimension: � � � � � ˙u − uxx = f in (0, 1) × (0, 3], u = 0 on {0, 1} × (0, 3], u = sin2(πx) in (0, 1) × {0}, (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1a) where f(x, t) = −(1 2 sin2(πx) + 2π2 cos(2πx))e−t/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1b) The exact solution of (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) is u = sin2(πx)e−t/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) To obtain the finite element solution uh, we have used piecewise linear basis functions in space, and in time we have used the discontinuous Galerkin methods dG(0) and dG(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In other words, the finite element method defined by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) for p = 1 and q = 0, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The right-hand side integrals involving f have been approximated locally by quadrature over the space-time prisms: first quadrature in time, then quadrature in space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In space, three-point Gauss-Legendre quadrature has been used, thus resulting in a quadrature error of the sixth order, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', quadrature error ∼ h6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For dG(0) in time, the midpoint rule has been used, thus resulting in a quadrature error of the second order, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', quadrature error ∼ k2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For dG(1) in time, three-point Lobatto quadrature has been used, thus resulting in a quadrature error of the fourth order, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', quadrature error ∼ k4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The velocity µ of the overlapping mesh has been 0 on every subinterval In = (tn−1, tn] in accordance with dG(0) mesh movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The stabilization parameter γ = 10 in all simulations used to obtain the numerical results presented in this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Illustrative examples The solution is presented for two different pairs of equidistant space-time discretizations, where G is immersed in Ω0 for all t ∈ [0, 3], and the length of G is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' First, we consider the coarse case: (22 + 7) × 10, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', 22 nodes for T0, 7 nodes for TG, and 10 time steps on the interval (0, 3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Second, we consider the fine case: (44 + 14) × 30, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', 44 nodes for T0, 14 nodes for TG, and 30 time steps on the interval (0, 3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We present the solution for these two cases for three different velocities (µ) of the overlapping mesh TG: µ = 0, µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1, and µ = 1 2 sin( 2πt 3 ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We consider these six cases in Figure 5–7 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 43 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 5: The coarse case (left) and the fine case (right) for µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The background mesh T0 is blue and its nodes are marked with small blue circles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The overlapping mesh TG is red and its nodes are marked with small red crosses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The space-time boundary ¯Γn between the two meshes is black.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 6: The coarse case (left) and the fine case (right) for µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The background mesh T0 is blue and its nodes are marked with small blue circles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The overlapping mesh TG is red and its nodes are marked with small red crosses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The space-time boundary ¯Γn between the two meshes is black.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 44 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 7: The coarse case (left) and the fine case (right) for µ = 1 2 sin( 2πt 3 ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The background mesh T0 is blue and its nodes are marked with small blue circles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The overlapping mesh TG is red and its nodes are marked with small red crosses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The space-time boundary ¯Γn between the two meshes is black.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Convergence study The error is the L2(Ω0)-norm of the difference between the exact and the finite element solution at the final time, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', ∥e(T)∥L2(Ω0) = ∥u(T) − u− h,N∥Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The integral in the L2- norm has been approximated by composite three-point Gauss-Legendre quadrature, thus resulting in a quadrature error of the third order , i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', quadrature error ∼ (h6)1/2 = h3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We present results displaying the error’s dependence on both the time step k and the mesh size h, separately, for different constant values of µ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Besides the computed error, each error convergence plot contains a line segment that has been computed with the linear least squares method to fit the error data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This line segment is referred to as the LLS of the error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is given in the caption beneath each error convergence figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Reference slopes are also included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Both T0 and TG are uniform meshes, with mesh sizes h0 and hG, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The temporal discretization is also uniform with time step k for each instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Furthermore, the final time is set to T = 1, the length of the overlapping mesh TG is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25 and the initial position of TG is the spatial interval [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='125, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='125 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In the plots with the error versus k, the mesh sizes h = h0 = hG have been fixed at a sufficiently small value so that the error’s dependence on h has been negligible in comparison with its dependence on k, and vice versa in the plots with the error versus h = h0 ≥ hG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The fixed values for the mesh size and the time step have been obtained by trial and error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 45 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 dG(0) in time 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Illustrative examples Figure 8–13 display the dG(0) finite element solution uh for the six different cases (Figure 5– 7) from two different angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 8: The dG(0) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 0 (2D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 Figure 9: The dG(0) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 0 (3D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 46 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 10: The dG(0) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (2D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 Figure 11: The dG(0) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (3D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 47 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 12: The dG(0) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 1 2 sin( 2πt 3 ) (2D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 Figure 13: The dG(0) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 1 2 sin( 2πt 3 ) (3D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Convergence study Figure 14 and Figure 15 display two error convergence plots each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The left plots show the error versus k, and the right plots show the error versus h = h0 ≥ hG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The velocity is µ = 0 in Figure 14 and µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 in Figure 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In the plots displaying the error versus k, the mesh sizes have been fixed at h = h0 = hG = 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Analogously, in the plots with the error versus h, the time step has been fixed at k = 10−4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 48 10-2 10-1 100 10-8 10-7 10-6 10-5 10-4 10-3 10-2 10-1 10-3 10-2 10-1 10-9 10-8 10-7 10-6 10-5 10-4 10-3 10-2 Figure 14: Error convergence for dG(0) when µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Left: The error versus k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0064.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Right: The error versus h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0559.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 10-2 10-1 100 10-8 10-7 10-6 10-5 10-4 10-3 10-2 10-1 10-3 10-2 10-1 10-9 10-8 10-7 10-6 10-5 10-4 10-3 10-2 Figure 15: Error convergence for dG(0) when µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Left: The error versus k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0064.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Right: The error versus h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0501.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slopes of the LLS of the error for different values of the velocity µ are presented in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 49 Slope of the LLS of the error µ error versus k (points used) error versus h (points used) 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0064 (1–15) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0559 (1–11) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0064 (1–15) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0486 (1–11) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0064 (1–15) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0421 (1–11) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0064 (1–15) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0422 (1–11) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0064 (1–15) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0501 (1–11) Table 2: The slope of the LLS of the error versus k and h for different values of µ for dG(0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 50 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 dG(1) in time 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Illustrative examples Figure 16–21 display the dG(1) finite element solution uh for the six different cases (Fig- ure 5–7) from two different angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 16: The dG(1) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 0 (2D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 Figure 17: The dG(1) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 0 (3D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 51 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 18: The dG(1) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (2D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 Figure 19: The dG(1) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (3D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 52 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 3 Figure 20: The dG(1) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 1 2 sin( 2πt 3 ) (2D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 1 2 3 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9 1 Figure 21: The dG(1) finite element solution uh for the coarse case (left) and the fine case (right) for µ = 1 2 sin( 2πt 3 ) (3D view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Convergence study Figure 22 and Figure 23 display two error convergence plots each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The left plots show the error versus k, and the right plots show the error versus h = h0 ≥ hG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The velocity is µ = 0 in Figure 22 and µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 in Figure 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' In the plots displaying the error versus k, the mesh sizes have been fixed at h = h0 = hG = 5 · 10−5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Analogously, in the plots with the error versus h, the time step has been fixed at k = 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 53 10-2 10-1 100 10-9 10-8 10-7 10-6 10-5 10-4 10-3 10-2 10-3 10-2 10-1 10-9 10-8 10-7 10-6 10-5 10-4 10-3 10-2 Figure 22: Error convergence for dG(1) when µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Left: The error versus k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7890.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Right: The error versus h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0122.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 10-2 10-1 100 10-9 10-8 10-7 10-6 10-5 10-4 10-3 10-2 10-3 10-2 10-1 10-9 10-8 10-7 10-6 10-5 10-4 10-3 10-2 Figure 23: Error convergence for dG(1) when µ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Left: The error versus k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8437.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Right: The error versus h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slope of the LLS of the error is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0082.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The slopes of the LLS of the error for different values of the velocity µ are presented in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 54 Slope of the LLS of the error µ error versus k (points used) error versus h (points used) 0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7890 (9–12) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0122 (1–15) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9142 (9–12) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0058 (1–15) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8493 (9–12) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0024 (1–15) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6994 (9–12) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0024 (1–15) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8437 (9–12) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='0082 (1–15) Table 3: The slope of the LLS of the error versus k and h for different values of µ for dG(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5 Comparison with analytic results From the a priori error estimate in Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 we have that the error has the following dependence on the time step k and mesh size h: ∥e(T)∥L2(Ω0) ∼ k2q+1 + hp+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) Thus with p = 1, Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 says that ∥e(T)∥L2(Ω0) ∼ k1 + h2, with dG(0) in time, (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) ∥e(T)∥L2(Ω0) ∼ k3 + h2, with dG(1) in time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) The slopes of the LLS of the numerical error presented in Table 2 and 3 thus verify the analytic error convergence orders from Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 8 Conclusions We have presented a cut finite element method for a parabolic model problem on an over- lapping mesh situation: one stationary background mesh and one discontinuously moving, slabwise stationary overlapping mesh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We have applied the analysis framework presented in [12, 13] to the method with natural modifications to account for the CutFEM setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The greatest difference and novelty in the presented analysis is the shift operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The main results of the analysis are basic and strong stability estimates and an optimal order a priori error estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We have also presented numerical results for a parabolic problem in one spatial dimension that verify the analytic error convergence orders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 55 A Analytic tools Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (A jump identity).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let ω+, ω− ∈ R and ω+ + ω− = 1, let [A] := A+ − A−, and ⟨A⟩ := ω+A+ + ω−A−.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We then have [AB] = [A]⟨B⟩ + ⟨A⟩[B] + (ω− − ω+)[A][B].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The three terms on the right-hand side of (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) are [A]⟨B⟩ = (A+ − A−)(ω+B+ + ω−B−) = ω+A+B+ + ω−A+B− − ω+A−B+ − ω−A−B−, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) ⟨A⟩[B] = (ω+A+ + ω−A−)(B+ − B−) = ω+A+B+ − ω+A+B− + ω−A−B+ − ω−A−B−, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) (ω− − ω+)[A][B] = (ω− − ω+)(A+ − A−)(B+ − B−) = (ω− − ω+)(A+B+ − A+B− − A−B+ + A−B−).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) Adding these three expressions gives [A]⟨B⟩ + ⟨A⟩[B] + (ω− − ω+)[A][B] = ω+A+B+ + ω−A+B− − ω+A−B+ − ω−A−B− + ω+A+B+ − ω+A+B− + ω−A−B+ − ω−A−B− + ω−A+B+ − ω−A+B− − ω−A−B+ + ω−A−B− − ω+A+B+ + ω+A+B− + ω+A−B+ − ω+A−B−, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) which after cancellation of most of the terms yields (ω+ + ω−)A+B+ − (ω+ + ω−)A−B− = A+B+ − A−B− = [AB].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 (Partial integration in broken Sobolev spaces).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For d = 1, 2, or 3, let Ω ⊂ Rd be a bounded domain and let Γ ⊂ Ω be a continuous manifold of codimension 1 that partitions Ω into the subdomains Ω1, · · · , ΩN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For ψ ∈ H2(Ω) and v ∈ H1 0(Ω1, · · · , ΩN), we have that (−∆ψ, v)Ω = N � i=1 (∇ψ, ∇v)Ωi − (⟨∂nψ⟩, [v])Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Using the partition of Ω and Green’s first identity, the left-hand side of (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) is (−∆ψ, v)Ω = N � i=1 (−∆ψ, v)Ωi = N � i=1 � (∇ψ, ∇v)Ωi − ((∂nψ)i, vi)∂Ωi � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) 56 Letting γ ⊂ Γ denote the common boundary between two subdomains, and noting that the γ’s form a partition of Γ, the sum of the boundary terms is N � i=1 ((∂nψ)i, vi)∂Ωi = N � i=1 � ((∂nψ)i, vi)∂Ωi∩∂Ω � �� � =0, v|∂Ω=0 +((∂nψ)i, vi)∂Ωi∩Γ � = � γ � ((∂nψ)+, v+)γ + ((∂nψ)−, v−)γ � = � Γ n+ · (∇ψv)+ + n− · (∇ψv)− ds = � Γ n · (∇ψv)+ − n · (∇ψv)− ds = � Γ [∂nψv] ds (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) = � Γ [∂nψ] � �� � =0 ⟨v⟩ + ⟨∂nψ⟩[v] + (ω− − ω+) [∂nψ] � �� � =0 [v] ds = (⟨∂nψ⟩, [v])Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) In the penultimate equality, we have used Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 and that [∂nψ]|Γ = 0 in L2(Γ) which follows from the regularity of ψ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This shows (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Consider the domain partition and its corresponding broken Sobolev space presented in the premise of Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We define the symmetric bilinear form A that generalizes the appearence of Ah,t, defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2), to this setting by A(w, v) := N � i=1 (∇w, ∇v)Ωi − (⟨∂nw⟩, [v])Γ − (⟨∂nv⟩, [w])Γ + (γh−1 K [w], [v])Γ + ([∇w], [∇v])ΩO, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) where we just let h−1 K be some spatially dependent function of sufficient regularity and ΩO be some union of subsets of subdomains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The specifics of h−1 K and ΩO are of course taken to be the natural ones when restricting A to Ah,t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By introducing A to Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2, we get (−∆ψ, v)Ω (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) = N � i=1 (∇ψ, ∇v)Ωi − (⟨∂nψ⟩, [v])Γ = N � i=1 (∇ψ, ∇v)Ωi − (⟨∂nψ⟩, [v])Γ − (⟨∂nv⟩, [ψ] ���� =0 )Γ + (γh−1 K [ψ] ���� =0 , [v])Γ + ([∇ψ] ���� =0 , [∇v])ΩO (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) = A(ψ, v), (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) 57 where [ψ]|Γ = 0 follows from Sobolev’s inequality, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', ψ ∈ C(Ω) for d = 1, 2, 3, and [∇ψ]|ΩO = 0 since (∇ψ)+ = (∇ψ)− on ΩO for a non-discrete function such as ψ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We present this result as the following corollary: Corollary A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (Partial integration in broken Sobolev spaces with bilinear forms A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For d = 1, 2, or 3, let Ω ⊂ Rd be a bounded domain and let Γ ⊂ Ω be a continuous manifold of codimension 1 that partitions Ω into the subdomains Ω1, · · · , ΩN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For this setting, let the symmetric bilinear form A be defined by (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For ψ ∈ H2(Ω) and v ∈ H1 0(Ω1, · · · , ΩN), we have that (−∆ψ, v)Ω = A(ψ, v).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 (A scaled trace inequality for domain partitioning manifolds of codimension 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For d = 1, 2, or 3, let Ω ⊂ Rd be a bounded domain with diameter L, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', L = diam(Ω) = supx,y∈Ω |x − y|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let Γ ⊂ Ω be a continuous manifold of codimension 1 that partitions Ω into N subdomains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that ∥v∥2 Γ ≤ C � L−1∥v∥2 Ω + L∥∇v∥2 Ω � , ∀v ∈ H1(Ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' If (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) holds for the case N = 2, then that result may be applied repeatedly to show (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) for N > 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We thus assume that Γ partitions Ω into two subdomains denoted Ω1 and Ω2 with diameters L1 and L2, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From the regularity assumptions on v, we have for i = 1, 2, that v ∈ H1(Ωi) and thus ∥v∥2 Γ ≤ ∥v∥2 ∂Ωi ≤ Ci � L−1 i ∥v∥2 Ωi + Li∥∇v∥2 Ωi � , (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) where the first inequality follows from Γ ⊂ ∂Ωi, and the second is a standard scaled trace inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Now consider the sum L1/L + L2/L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We have that 1 ≤ L1 L + L2 L ≤ 2, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) where the lower bound follows from the triangle type inequality L ≤ L1+L2, and the upper bound simply follows from Li ≤ L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We are now ready to show the desired inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The left-hand side of (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) is ∥v∥2 Γ = (1)∥v∥2 Γ (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) ≤ �L1 L + L2 L � ∥v∥2 Γ = L1 L ∥v∥2 Γ + L2 L ∥v∥2 Γ (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) ≤ L1 L C1 � L−1 1 ∥v∥2 Ω1 + L1∥∇v∥2 Ω1 � + L2 L C2 � L−1 2 ∥v∥2 Ω2 + L2∥∇v∥2 Ω2 � 3rd ≤ C1 L ∥v∥2 Ω1 + C1L∥∇v∥2 Ω1 + C2 L ∥v∥2 Ω2 + C2L∥∇v∥2 Ω2 ≤ max i {Ci}L−1 � ∥v∥2 Ω1 + ∥v∥2 Ω2 � + max i {Ci}L � ∥∇v∥2 Ω1 + ∥∇v∥2 Ω2 � = C � L−1∥v∥2 Ω + L∥∇v∥2 Ω � , (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) 58 where we have used that Li ≤ L to obtain the third inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This shows (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let ΓK = ΓK(t) = K ∩ Γ(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For t ∈ [0, T], j ∈ {0, G}, a simplex K ∈ Tj,Γ(t) = {K ∈ Tj : K ∩ Γ(t) ̸= ∅}, and v ∈ H1(K), we have from Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 that ∥v∥2 ΓK ≤ C � h−1 K ∥v∥2 K + hK∥∇v∥2 K � , (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) where hK is the diameter of K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For v ∈ P(K), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', a polynomial on K, we have the standard inverse estimate ∥Dk xv∥2 K ≤ Ch−2 K ∥Dk−1 x v∥2 K, for k ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) For v ∈ Vh(t), we thus have ∥Dk xv∥2 ΓK (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) ≤ C � h−1 K ∥Dk xv∥2 K + hK∥∇Dk xv∥2 K � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) ≤ C � h−1 K ∥Dk xv∥2 K + hKCh−2 K ∥Dk xv∥2 K � = Ch−1 K ∥Dk xv∥2 K, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19) which we present as the following corollary: Corollary A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 (A discrete spatial local inverse inequality for ΓK(t)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For t ∈ [0, T], j ∈ {0, G}, K ∈ Tj,Γ(t) with diameter hK, let ΓK(t) = K ∩ Γ(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then, for k ≥ 0, there exists a constant C > 0 such that ∥Dk xv∥2 ΓK(t) ≤ Ch−1 K ∥Dk xv∥2 K, ∀v ∈ Vh(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) Lemma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4 (A discrete spatial inverse inequality for Γ(t)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let the mesh-dependent norm ∥ · ∥−1/2,h,Γ(t) be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then, for t ∈ [0, T], there exists a constant CI > 0, independent of h, such that ∥⟨∂¯nxv⟩∥2 −1/2,h,Γ(t) ≤ CI � 2 � i=1 ∥∇v∥2 Ωi(t) + ∥[∇v]∥2 ΩO(t) � , ∀v ∈ Vh(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To lighten the notation we omit the time dependence, which has no importance here anyways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We follow the proof of the corresponding inequality in [2] with some modi- fications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We use index j ∈ {0, G}, such that, if j = 0, then i = 1 and if j = G, then i = 2, and let ΓKj = Kj ∩ Γ and Tj,Γ = {Kj ∈ Tj : Kj ∩ Γ ̸= ∅}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Note that for i = 1, 2, � K0∈T0,Γ hK0∥vi∥2 ΓK0 ≤ h � K0∈T0,Γ ∥vi∥2 ΓK0 = h � KG∈TG,Γ ∥vi∥2 ΓKG ≤ C � KG∈TG,Γ hKG∥vi∥2 ΓKG, (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) 59 where we have used that ∪K0∈T0,ΓΓK0 = Γ = ∪KG∈TG,ΓΓKG to obtain the identity, and the quasi-uniformity of T0 and TG to obtain the last inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Using the norm definition and recalling that ⟨v⟩ = ω1v1 + ω2v2, the left-hand side of (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) is ∥⟨∂¯nxv⟩∥2 −1/2,h,Γ = � K0∈T0,Γ hK0∥⟨∂¯nxv⟩∥2 ΓK0 ≤ � K0∈T0,Γ 2hK0∥ω1(∂¯nxv)1∥2 ΓK0 + � K0∈T0,Γ 2hK0∥ω2(∂¯nxv)2∥2 ΓK0 (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) ≤ � K0∈T0,Γ 2hK0∥ω1(∂¯nxv)1∥2 ΓK0 + C � KG∈TG,Γ 2hKG∥ω2(∂¯nxv)2∥2 ΓKG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) Since ∂¯nxv = ¯nx · ∇v, we have ∥ωi(∂¯nxv)i∥2 ΓKj ≤ � ΓKj |ωi|2|¯nx|2 � �� � ≤1 |(∇v)i|2 ds ≤ ∥(∇v)i∥2 ΓKj (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) ≤ Ch−1 Kj∥(∇v)i∥2 Kj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) Using (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) in (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23), we get ∥⟨∂¯nxv⟩∥2 −1/2,h,Γ ≤ � K0∈T0,Γ 2hK0Ch−1 K0∥∇v∥2 K0 + C � KG∈TG,Γ 2hKGCh−1 KG∥∇v∥2 KG ≤ C � K0∈T0,Γ ∥∇v∥2 K0 + C � KG∈TG,Γ ∥∇v∥2 KG = C � K0∈T0,Γ � ∥∇v∥2 K0∩Ω1 + ∥(∇v)1∥2 K0∩Ω2 � + C � KG∈TG,Γ ∥∇v∥2 KG ≤ C∥∇v∥2 Ω1 + C∥(∇v)1∥2 ΩO + C∥∇v∥2 Ω2 ≤ C 2 � i=1 ∥∇v∥2 Ωi + C∥(∇v)1 ± (∇v)2∥2 ΩO ≤ C 2 � i=1 ∥∇v∥2 Ωi + C � ∥[∇v]∥2 ΩO + ∥(∇v)2∥2 ΩO � ≤ CI � 2 � i=1 ∥∇v∥2 Ωi + ∥[∇v]∥2 ΩO � , (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) which is the desired estimate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 60 B Interpolation B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 Spatial interpolation operator For the definition of the spatial interpolation operator, we recall the semi-discrete spaces Vh,0 and Vh,G, defined by (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) and (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We define the spatial interpolation operators πh,0 : L1(Ω0) → Vh,0 and πh,G : L1(G) → Vh,G to be the Scott-Zhang interpolation operators for the spaces Vh,0 and Vh,G, respectively, where the defining integrals are taken over entire simplices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We point out that πh,G = πh,G(t), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', it is time-dependent, since G is allowed to move around, but we omit the t to lighten the notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For t ∈ [0, T], we define the spatial interpolation operator Ih,t : L1(Ω0) → Vh(t) by Ih,tv|Ω1(t) := πh,0v|Ω1(t), Ih,tv|Ω2(t) := πh,Gv|Ω2(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1) The operator Ih,t is used in the proofs of Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 and Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4, where energy estimates of its interpolation error is used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We present and prove these estimates in two lemmas below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Lemma B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1 (An interpolation error estimate in |||·|||Ah,t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let |||·|||Ah,t and Ih,t be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) and (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that |||v − Ih,tv|||Ah,t ≤ Chp∥Dp+1 x v∥Ω0, ∀v ∈ Hp+1(Ω0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' To lighten the notation we omit the time dependence, which has no importance here anyways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Letting w = v − Ih,tv, and using the definition of |||·|||Ah,t, the square of the left-hand side of (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) is |||w|||2 Ah,t = 2 � i=1 ∥∇w∥2 Ωi � �� � = I + ∥⟨∂nw⟩∥2 −1/2,h,Γ � �� � = II + ∥[w]∥2 1/2,h,Γ � �� � = III + ∥[∇w]∥2 ΩO � �� � = IV .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) Letting wj = v − πh,jv, we treat each term in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) separately, starting with the first: I = ∥∇w∥2 Ωi ≤ � K∈Tj,Ωi ∥∇wj∥2 K (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4) where we have expanded the spatial integration domain by going from Ωi to all simplices 61 in Tj that are cut by Ωi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The second term is II = ∥⟨∂nw⟩∥2 −1/2,h,Γ = � K0∈T0,Γ hK0∥⟨∂nw⟩∥2 ΓK0 ≤ � K0∈T0,Γ 2hK0∥ω1(∂nv)1∥2 ΓK0 + � K0∈T0,Γ 2hK0∥ω2(∂nv)2∥2 ΓK0 (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) ≤ � K0∈T0,Γ 2hK0∥ω1(∂nv)1∥2 ΓK0 + C � KG∈TG,Γ 2hKG∥ω2(∂nv)2∥2 ΓKG ≤ C 2 � i=1 � Kj∈Tj,Γ hKj∥ωi(∂nw)i∥2 ΓKj (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) ≤ C 2 � i=1 � Kj∈Tj,Γ hKj∥(∇w)i∥2 ΓKj (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) ≤ C 2 � i=1 � Kj∈Tj,Γ hKjC � h−1 Kj∥∇wj∥2 Kj + hKj∥D2 xwj∥2 Kj � ≤ C 2 � i=1 � Kj∈Tj,Γ � ∥∇wj∥2 Kj + h2 Kj∥D2 xwj∥2 Kj � ≤ C 2 � i=1 � K∈Tj,Ωi � ∥∇wj∥2 K + h2 K∥D2 xwj∥2 K � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='5) The third term in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) receives the same treatment, and thus III = ∥[w]∥2 1/2,h,Γ = � K0∈T0,Γ h−1 K0∥[w]∥2 ΓK0 ≤ � K0∈T0,Γ 2h−1 K0∥w1∥2 ΓK0 + � K0∈T0,Γ 2h−1 K0∥w2∥2 ΓK0 (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) ≤ � K0∈T0,Γ 2h−1 K0∥w1∥2 ΓK0 + C � KG∈TG,Γ 2h−1 KG∥w2∥2 ΓKG ≤ C 2 � i=1 � Kj∈Tj,Γ h−1 Kj∥wi∥2 ΓKj (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) ≤ C 2 � i=1 � Kj∈Tj,Γ h−1 KjC � h−1 Kj∥wj∥2 Kj + hKj∥∇wj∥2 Kj � ≤ C 2 � i=1 � Kj∈Tj,Γ � h−2 Kj∥wj∥2 Kj + ∥∇wj∥2 Kj � ≤ C 2 � i=1 � Kj∈Tj,Ωi � h−2 K ∥wj∥2 K + ∥∇wj∥2 K � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='6) 62 The fourth term in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3) is IV = ∥[∇w]∥2 ΩO = 2∥(∇w)1∥2 ΩO + 2∥(∇w)2∥2 ΩO = C 2 � i=1 ∥(∇w)i∥2 ΩO = C 2 � i=1 � K∈T0,Γ ∥∇wj∥2 K∩Ω2 ≤ C 2 � i=1 � K∈T0,Γ ∥∇wj∥2 K ≤ C 2 � i=1 � K∈Tj,Ωi ∥∇wj∥2 K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) We are now done with the separate treatments of all the terms in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Summing up what we have, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', using (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='4)–(B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='7) in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' we get |||w|||2 Ah,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='t ≤ 2 � i=1 � K∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='Ωi ∥∇wj∥2 K � �� � ≥ I + C 2 � i=1 � K∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='Ωi � ∥∇wj∥2 K + h2 K∥D2 xwj∥2 K � � �� � ≥ II + C 2 � i=1 � K∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='Ωi � h−2 K ∥wj∥2 K + ∥∇wj∥2 K � � �� � ≥ III + C 2 � i=1 � K∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='Ωi ∥∇wj∥2 K � �� � ≥ IV ≤ C 2 � i=1 � K∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='Ωi � h−2 K ∥wj∥2 K + ∥∇wj∥2 K + h2 K∥D2 xwj∥2 K � = C 2 � i=1 � K∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='Ωi � h−2 K ∥v − πh,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='jv∥2 K + ∥∇(v − πh,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='jv)∥2 K + h2 K∥D2 x(v − πh,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='jv)∥2 K � 4th ≤ C 2 � i=1 � K∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='Ωi � h−2 K � h2(p+1)∥Dp+1 x v∥2 N(K) � + � h2p∥Dp+1 x v∥2 N(K) � + h2 K � h2(p−1)∥Dp+1 x v∥2 N(K) �� ≤ Ch2p 2 � i=1 � K∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='Ωi ∥Dp+1 x v∥2 N(K) ≤ Ch2p 2 � i=1 ∥Dp+1 x v∥2 Ω0 = Ch2p∥Dp+1 x v∥2 Ω0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) where we have used standard local interpolation error estimates for Scott-Zhang interpo- lation operators in the fourth step, thus N(K) denotes the neighborhood of simplex K, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', all adjacent simplices to and including K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Taking the square root of both sides gives (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 63 Lemma B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 (An interpolation error estimate in |||·|||An).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' For n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, let |||·|||An and Ih,n = Ih,tn be defined by (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='32) and (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then there exists a constant C > 0 such that |||v − Ih,nv|||An ≤ Chp∥Dp+1 x v∥Ω0, ∀v ∈ Hp+1(Ω0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Letting w = v − Ih,nv, and plugging w into |||·|||2 An, we have |||w|||2 An = |||w|||2 An + ∥⟨∂nw⟩∥2 −1/2,h,Γn−1 = |||w|||2 An + ∥⟨∂nw⟩∥2 −1/2,h,Γn−1∩Γn + ∥⟨∂nw⟩∥2 −1/2,h,Γn−1\\Γn ≤ |||w|||2 An + |||w|||2 An + ∥⟨∂nw⟩∥2 −1/2,h,Γn−1\\Γn = C |||w|||2 An + ∥⟨∂nw⟩∥2 −1/2,h,Γn−1\\Γn (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) The second term in the last row is initially treated in the same way as its counterpart in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='39).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We thus partition Γn−1 \\ Γn into `Γi := (Γn−1 \\ Γn) ∩ Ωi,n, use the interdependent indices i and j, and write `ΓiKj = Kj ∩ `Γi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Letting wj = v − πh,jv, we have ∥⟨∂nw⟩∥2 −1/2,h,Γn−1\\Γn (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='39) ≤ C � `ΓiKj hKj � ∥(∇wj)+∥2 `ΓiKj + ∥(∇wj)−∥2 `ΓiKj � = C � `ΓiKj � σ∈{+,−} hKj∥(∇wj)σ∥2 `ΓiKj (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) ≤ C � `ΓiKj � σ∈{+,−} hKjC � h−1 Kσ j ∥∇wj∥2 Kσ j + hKσ j ∥D2 xwj∥2 Kσ j � ≤ C � `ΓiKj � σ∈{+,−} � ∥∇wj∥2 Kσ j + h2 Kσ j ∥D2 xwj∥2 Kσ j � ≤ C 2 � i=1 � K∈Tj,Ωi,n � ∥∇wj∥2 K + h2 K∥D2 xwj∥2 K � (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='8) ≤ Ch2p∥Dp+1 x v∥2 Ω0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) We thus have |||w|||2 An (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='10) ≤ C |||w|||2 An + ∥⟨∂nw⟩∥2 −1/2,h,Γn−1\\Γn (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='11) ≤ C |||w|||2 An + Ch2p∥Dp+1 x v∥2 Ω0 = C |||v − Ih,nv|||2 An + Ch2p∥Dp+1 x v∥2 Ω0 (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2) ≤ Ch2p∥Dp+1 x v∥2 Ω0 + Ch2p∥Dp+1 x v∥2 Ω0 = Ch2p∥Dp+1 x v∥2 Ω0, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='12) which shows (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 64 B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='2 Temporal interpolation operator For q ∈ N and n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' , N, we define the temporal interpolation operator ˜In = ˜In,q : C(In) → Pq(In) by (˜Inv)− n = v− n , (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13a) and with the additional condition for q ≥ 1, � In ˜Invw dt = � In vw dt, ∀w ∈ Pq−1(In).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13b) Lemma B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 (An interpolation error estimate in ∥ · ∥Ω0,In).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Let ˜In be defined by (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Then, for q = 0, 1, there exists a constant C > 0 such that for any function v : Ω0×In → R with sufficient spatial and temporal regularity we have that ˜In is bounded and that ∥v − ˜Inv∥Ω0,In ≤ Ckq+1 n ∥˙v(q+1)∥Ω0,In, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) where ∥v∥Ω0,In = maxt∈In ∥v∥Ω0, kn = tn − tn−1, and ˙v(q+1) = ∂q+1v/∂tq+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We start by deriving explicit expressions for ˜Inv, involving w, for q = 0, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' From these explicit expressions, boundedness of ˜In will follow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We then use these expressions to derive estimates for v − ˜Inv, from which (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) will be derived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Case q = 0 For q = 0, and any (x, t) ∈ Ω0 × In, (˜Inv)(x, t) = v(x, t− n ), (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) from (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The identity (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15) indicates that ˜In is bounded for q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This can easily be seen by, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', assuming v to be continuous in time on In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Using (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='15), we have for any (x, t) ∈ Ω0 × In (v − ˜Inv)(x, t) = v(x, t) − v(x, t− n ) = − � tn t ˙v(x, s) ds ≤ � In |˙v(x, t)| dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) By taking the squared L2(Ωi,n)-norm of v − ˜Inv, we obtain for any t ∈ In that ∥v − ˜Inv∥2 Ω0 = � Ω0 |(v − ˜Inv)(x, t)|2 dx (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='16) ≤ � Ω0 ���� � In |˙v(x, t)| dt ���� 2 dx ≤ � Ω0 kn � In |˙v(x, t)|2 dt dx = kn � In ∥˙v∥2 Ω0 dt ≤ k2 n∥˙v∥2 Ω0,In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) Taking the square root of both sides of (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) and the maximum over In of the left-hand side, since (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='17) holds for all t ∈ In, proves (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) for q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 65 Case q = 1 For q = 1, the procedure is a little bit trickier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We start by considering the following integral for any x ∈ Ω0: � In (t − tn)∂t(˜Inv)(x, t) dt = ∂t(˜Inv)(x, t− n ) � In (t − tn) dt = −1 2k2 n∂t(˜Inv)(x, t− n ), (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) where we have used the fact that ∂t(˜Inv) is constant in time on In for q = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' We may also use this fact to treat the integral as: � In (t − tn)∂t(˜Inv)(x, t) dt = � In (t − tn)(˜Inw)(x, t) − (˜Inv)(x, t− n ) t − tn dt = � In (˜Inv)(x, t) dt − � In (˜Inv)(x, t− n ) dt = � In v(x, t) dt − � In v(x, t− n ) dt = � In v(x, t) − v(x, t− n ) dt, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19) where we have used (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13) to obtain the last equality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' By Taylor expansion in time of ˜Inv at t− n , we have for any (x, t) ∈ Ω0 × In (˜Inv)(x, t) = (˜Inv)(x, t− n ) + (t − tn)∂t(˜Inv)(x, t− n ) = v(x, t− n ) − 2(t − tn) k2 n � In v(x, t) − v(x, t− n ) dt, (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) where we have used (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='13a), and combined (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='18) with (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='19) to obtain the last equality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The identity (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20) indicates that ˜In is bounded for q = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' This can be seen by, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', assuming v to be continuously differentiable in time on In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Using (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='20), we have for any (x, t) ∈ Ω0 × In that (v − ˜Inv)(x, t) = v(x, t) − v(x, t− n ) � �� � =I + 2(t − tn) k2 n � In v(x, t) − v(x, t− n ) dt � �� � =II .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) We consider the terms separately, starting with the first: I = v(x, t) − v(x, t− n ) = − � tn t ˙v(x, s) ds = � tn t (s − t)∂2 sv(x, s) ds − � (s − t)˙v(x, s) �tn t = � tn t (s − t)∂2 sv(x, s) ds − (tn − t)˙v(x, t− n ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) 66 The second term in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21) is II = 2(t − tn) k2 n � In v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t) − v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t− n ) dt = 2(t − tn) k2 n � � (t − tn−1)(v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t) − v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t− n )) �tn tn−1 � �� � =0 − � In (t − tn−1)∂t(v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t) − v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t− n )) dt � = 2(tn − t) k2 n � In (t − tn−1)˙v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t) dt = 2(tn − t) k2 n ��(t − tn−1)2 2 ˙v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t) �tn tn−1 − � In (t − tn−1)2 2 ∂2 t v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t) dt � = 2(tn − t) k2 n k2 n 2 ˙v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t− n ) − 2(tn − t) k2 n � In (t − tn−1)2 2 ∂2 t v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t) dt = (tn − t)˙v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t− n ) − (tn − t) k2 n � In (t − tn−1)2∂2 t v(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' t) dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) Using the identities (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='22) and (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='23) in (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='21), we have (v − ˜Inv)(x, t) = � tn t (s − t)∂2 sv(x, s) ds −(tn − t)˙v(x, t− n ) + (tn − t)˙v(x, t− n ) � �� � =0 − (tn − t) k2 n � In (t − tn−1)2∂2 t v(x, t) dt = � tn t (s − t)∂2 sv(x, s) ds − (tn − t) k2 n � In (t − tn−1)2∂2 t v(x, t) dt ≤ � tn t |s − t||∂2 sv(x, s)| ds + |tn − t| k2 n � In |t − tn−1|2|∂2 t v(x, t)| dt ≤ kn � In |∂2 sv(x, s)| ds + kn � In |∂2 t v(x, t)| dt = 2kn � In |∂2 t v(x, t)| dt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) By taking the squared L2(Ωi,n)-norm of v − ˜Inv, we obtain for any t ∈ In that ∥v − ˜Inv∥2 Ω0 = � Ω0 |(v − ˜Inv)(x, t)|2 dx (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='24) ≤ � Ω0 ����2kn � In |∂2 t v(x, t)| dt ���� 2 dx ≤ � Ω0 4k2 nkn � In |∂2 t v(x, t)|2 dt dx = 4k3 n � In ∥˙v(2)∥2 Ω0 dt ≤ 4k4 n∥˙v(2)∥2 Ω0,In.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) 67 Taking the square root of both sides of (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) and the maximum over In of the left-hand side, since (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='25) holds for all t ∈ In, proves (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='14) for q = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' The proof of Lemma B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='3 is thus complete.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' References [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Nitsche, “¨Uber ein Variationsprinzip zur L¨osung von Dirichlet-Problemen bei Ver- wendung von Teilr¨aumen, die keinen Randbedingungen unterworfen sind,” in Abhand- lungen aus dem Mathematischen Seminar der Universit¨at Hamburg, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Springer, 1971, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 9–15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [2] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Hansbo and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Hansbo, “An unfitted finite element method, based on Nitsche’s method, for elliptic interface problems,” Comp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Methods Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Mech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Engrg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 191, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 47, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 5537–5552, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [3] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Hansbo, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Hansbo, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Larson, “A finite element method on composite grids based on Nitsche’s method,” ESAIM, Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Numer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 37, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 03, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 495–514, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [4] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Burman and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Fern´andez, “Stabilized explicit coupling for fluid-structure interaction using Nitsche’s method,” C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Paris, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 345, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 8, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 467–472, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [5] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Burman and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Hansbo, “A unified stabilized method for Stokes’ and Darcy’s equations,” J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 198, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 35–51, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [6] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Becker, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Burman, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Hansbo, “A Nitsche extended finite element method for incompressible elasticity with discontinuous modulus of elasticity,” Comp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Methods Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Mech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Engrg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 198, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 41, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 3352–3360, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [7] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Massing, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Larson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Logg, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Rognes, “A Stabilized Nitsche Fictitious Domain Method for the Stokes Problem,” Journal of Scientific Computing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 61, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 604–628, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [8] ——, “A stabilized Nitsche overlapping mesh method for the Stokes problem,” Nu- merische Mathematik, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 128, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 73–101, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [9] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Johansson, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Larson, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Logg, “High order cut finite element methods for the Stokes problem,” Advanced Modeling and Simulation in Engineering Sciences, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 2, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 1–23, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Available: http://dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content='1186/s40323-015-0043-7 [10] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Dokken, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Funke, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Johansson, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Schmidt, “Shape optimization using the finite element method on multiple meshes with nitsche coupling,” SIAM Journal on Scientific Computing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 41, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' A1923–A1948, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 68 [11] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Johansson, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Kehlet, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Larson, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Logg, “Multimesh finite element meth- ods: Solving PDEs on multiple intersecting meshes,” Computer Methods in Applied Mechanics and Engineering, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [12] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Eriksson and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Johnson, “ADAPTIVE FINITE ELEMENT METHODS FOR PARABOLIC PROBLEMS I: A LINEAR MODEL PROBLEM,” SIAM Journal on Numerical Analysis, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 28, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 43–77, 1991.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [13] ——, “Adaptive Finite Element Methods for Parabolic Problems II: Optimal Error Estimates in L∞L2 and L∞L∞,” SIAM Journal on Numerical Analysis, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 32, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 706–740, 1995.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' [14] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' Lundholm, “A space-time cut finite element method for a time-dependent parabolic model problem,” MSc thesis, Chalmers University of Technology and University of Gothenburg, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} +page_content=' 69' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ZNFRT4oBgHgl3EQfPjcf/content/2301.13517v1.pdf'} diff --git a/ZtAyT4oBgHgl3EQfvvnv/content/2301.00638v1.pdf b/ZtAyT4oBgHgl3EQfvvnv/content/2301.00638v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0aae6c55e0969ec075d30a5ba494d291759c6268 --- /dev/null +++ b/ZtAyT4oBgHgl3EQfvvnv/content/2301.00638v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b55ef8bea08bc6eb193f7964cfccf2285da56f19e24ffb3305a787fdbecf1cc +size 2677851 diff --git a/ZtAyT4oBgHgl3EQfvvnv/vector_store/index.faiss b/ZtAyT4oBgHgl3EQfvvnv/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..87c271f0e4c386593bc1905c007811b0a736997b --- /dev/null +++ b/ZtAyT4oBgHgl3EQfvvnv/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d031e916fa106aaf33922a2fe6c2c54e87243373c9c9b7c07b0d8762335729ec +size 4718637 diff --git a/ZtAyT4oBgHgl3EQfvvnv/vector_store/index.pkl b/ZtAyT4oBgHgl3EQfvvnv/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f3215cc45bf16bf68077612b328fb8916ac67cde --- /dev/null +++ b/ZtAyT4oBgHgl3EQfvvnv/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ecc10c999343dcb81598327133206e7c4be71ed2286bc08adc46a6d7e9989d4 +size 152206 diff --git a/a9E4T4oBgHgl3EQfoQ0b/content/2301.05182v1.pdf b/a9E4T4oBgHgl3EQfoQ0b/content/2301.05182v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c30d89770d307b464afbb010b731840222545004 --- /dev/null +++ b/a9E4T4oBgHgl3EQfoQ0b/content/2301.05182v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6089b0d5d29fcdfdae1332bcdcc798f99a0d732dfb0e0346e7ade05bddfa2a7 +size 7801240 diff --git a/a9E4T4oBgHgl3EQfoQ0b/vector_store/index.faiss b/a9E4T4oBgHgl3EQfoQ0b/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..abb5ec6a5b9586f7ad1de292d119846f4278a3b8 --- /dev/null +++ b/a9E4T4oBgHgl3EQfoQ0b/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a4af168ca555fd4812dc039d4bc1a90228bcb8901dd9c9a8a35cf87dd85d212 +size 7995437 diff --git a/a9FLT4oBgHgl3EQfXi8l/vector_store/index.faiss b/a9FLT4oBgHgl3EQfXi8l/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..6683618e410ccc5b7f551a90fe1ecb40ea671af2 --- /dev/null +++ b/a9FLT4oBgHgl3EQfXi8l/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:640cd1216eeda730ff836c4dbd763fe7a6d0632c6f74b596dd2989b484cf726f +size 8192045 diff --git a/aNE3T4oBgHgl3EQfdAo0/vector_store/index.faiss b/aNE3T4oBgHgl3EQfdAo0/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..4f71b8f36b79a4510766dca94b6a8e9d07785b51 --- /dev/null +++ b/aNE3T4oBgHgl3EQfdAo0/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:545d78818cf95b6a21b3892d2e2c7bbca3fbc535641037704e47fd421dffd1a9 +size 6815789 diff --git a/b9FPT4oBgHgl3EQfBTTg/content/2301.12985v1.pdf b/b9FPT4oBgHgl3EQfBTTg/content/2301.12985v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3805142ba7f16e2b00f6cb46e9a3c4df1b5687dc --- /dev/null +++ b/b9FPT4oBgHgl3EQfBTTg/content/2301.12985v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8edce3ffceef7f60c933fe78af33eca03cdc9b65e09b5dd5836d08c29bfb06da +size 26647695 diff --git a/bNAyT4oBgHgl3EQfivjL/content/2301.00403v1.pdf b/bNAyT4oBgHgl3EQfivjL/content/2301.00403v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0ec6720ebfda660e4d1c26d849aa199057e13d49 --- /dev/null +++ b/bNAyT4oBgHgl3EQfivjL/content/2301.00403v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13fe12f724242c100268f3bb6e399bd31212f074d901e4f2cc08a8cca86c7a66 +size 2825951 diff --git a/bNAyT4oBgHgl3EQfivjL/vector_store/index.pkl b/bNAyT4oBgHgl3EQfivjL/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..764b33e83fe4df87b09f36d99c3b41c092b579f0 --- /dev/null +++ b/bNAyT4oBgHgl3EQfivjL/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8532e4b92aeaf584ccb75133f64ec19b61871b670ecb48ece58f17bcab5d5f1c +size 83687 diff --git a/bNAzT4oBgHgl3EQfnP3n/content/tmp_files/2301.01579v1.pdf.txt b/bNAzT4oBgHgl3EQfnP3n/content/tmp_files/2301.01579v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..49dc9015d2bc2549000c902bf9fa17849ae0a64b --- /dev/null +++ b/bNAzT4oBgHgl3EQfnP3n/content/tmp_files/2301.01579v1.pdf.txt @@ -0,0 +1,2875 @@ +Learning Ambiguity from Crowd Sequential Annotations +Xiaolei Lu 1 +1 City University of Hong Kong +Abstract +Most crowdsourcing learning methods treat disagreement be- +tween annotators as noisy labelings while inter-disagreement +among experts is often a good indicator for the ambiguity +and uncertainty that is inherent in natural language. In this +paper, we propose a framework called Learning Ambiguity +from Crowd Sequential Annotations (LA-SCA) to explore +the inter-disagreement between reliable annotators and effec- +tively preserve confusing label information. First, a hierarchi- +cal Bayesian model is developed to infer ground-truth from +crowds and group the annotators with similar reliability to- +gether. By modeling the relationship between the size of group +the annotator involved in, the annotator’s reliability and ele- +ment’s unambiguity in each sequence, inter-disagreement be- +tween reliable annotators on ambiguous elements is computed +to obtain label confusing information that is incorporated to +cost-sensitive sequence labeling. Experimental results on POS +tagging and NER tasks show that our proposed framework +achieves competitive performance in inferring ground-truth +from crowds and predicting unknown sequences, and inter- +preting hierarchical clustering results helps discover labeling +patterns of annotators with similar reliability. +Introduction +Sequence labeling, which refers to assign sequences of la- +bels to observed sequential data, is widely used in Natural +Language Processing (NLP) tasks including Part-of-Speech +(POS) tagging, Chunking and Named Entity Recognition +(NER). Many downstream NLP applications (e.g. relation ex- +traction and machine translation ) can benefit from sequential +label assignments of these fundamental NLP tasks. +Traditional sequence labeling models like Hidden Markov +Models (HMMs) and Conditional Random Fields (CRFs) +require handcrafted features which need to be carefully de- +signed to obtain good results on a specific dataset. Over +the past decade, deep sequence models have resulted in im- +proving the performance of sequence labeling. For exam- +ple, Bi-LSTM-CRF (Huang, Xu, and Yu 2015) and Trans- +former(Vaswani et al. 2017). However, these sequence label- +ing models require a large amount of training data with exact +annotations, which is costly and laborious to produce. +In recent years, well-developed commercial crowdsourcing +platforms (e.g. Amazon Mechanical Turk and CrowdFlower +(Finin et al. 2010)) have flourished as effective tools to obtain +large labeled datasets. Crowdsourcing utilizes contribution +of the group’s intelligence, but the quality of crowd labels +still cannot be guaranteed as the expertise level of annotators +varies. Therefore the major focus of learning from crowds +is on estimating the reliability of annotators and building +prediction models based on the estimated ground-truth labels. +For example, Snow et al. (Snow et al. 2008) used bias correc- +tion to combine non-expert annotation. Raykar et al. (Raykar +et al. 2010) proposed to jointly estimate the coefficients of a +logistic regression classifier and the annotators’ expertise. +Many effective models like HMM-Crowd (Nguyen et al. +2017) and Sembler (Wu, Fan, and Yu 2012) extend crowd- +sourcing to sequence labeling, which enables better aggre- +gating crowd sequential annotations. But these approaches +measure the quality of crowd labels under the assumption of +only one ground-truth. As a result, the disagreement between +annotators has to be considered as noisy labelings. How- +ever, research in NLP field shows that inter-disagreement +among experts could be a good indicator for ambiguity and +uncertainty that is inherent in language (Plank, Hovy, and +Søgaard 2014b). Apparently, there is no clear answer for the +linguistically hard cases. As shown in Figure 1, “like” can +be tagged as conjunction or adjective. Furthermore, inter- +disagreement between experts could reveal confusing label +information that is related to the distribution of hard cases +over label pairs. Figure 2 demonstrates label confusion matrix +in POS tagging task, where “ADJ” (adjectives) and “NOUN” +(nouns) are more likely to be confused. Wisely incorporat- +ing confusing label information into supervised learning can +make the classifier more robust (Plank, Hovy, and Søgaard +2014b). However, existing crowd sequential models do not +take inter-disagreement between annotators into account. +To explore the inter-disagreement between reliable annota- +tors and effectively preserve confusing label information, in +this paper, we propose a framework called Learning Ambi- +guity from Crowd Sequential Annotations (LA-SCA). Our +contributions can be summarized as follows: +First, we develop a hierarchical Bayesian model to group +annotators into different clusters. By imposing an hierarchi- +cal prior on the confusion matrix that describes the reliability +of annotators in the same cluster, the hierarchical Bayesian +model allows the annotators that belong to the same clus- +ter to be characterized with different but similar reliability, +which aims to preserve inter-disagreement between reliable +arXiv:2301.01579v1 [cs.CL] 4 Jan 2023 + +Figure 1: Two examples of doubly-annotated twitter POS +tagging data by different experts. +Figure 2: Label confusion matrix derived from two gold +annotations of 500 twitters POS tagging. +annotators. +Second, a low-rank model is formulated to model the re- +lationship between the size of group the annotator involved +in, annotator’s reliability and element’s unambiguity in each +sequence. Then inter-disagreement between reliable annota- +tors on ambiguous elements can be obtained to compute label +confusion matrix. +Third, cost-sensitive mechanism is combined to sequence +labeling to encourage two more confusing label sequences +that contain the ground-truth incur a lower Hamming loss, +which aims to improve the robustness of sequence model. +Related Work +Hidden Markov Models (HMMs) (Juang and Rabiner 1991; +Qiao et al. 2015) and Conditional Random Fields (CRFs) +(Lafferty, McCallum, and Pereira 2001; Sarawagi and Cohen +2004) form the most popular generative-discriminative pair +for sequence labeling. With the great success of DL models, +the combination of deep learning and graphical models re- +ceives increasing attention (Chu et al. 2016). For example, +Bi-LSTM-CRF (Huang, Xu, and Yu 2015) is proposed to ef- +ficiently encode past and future input features by combining +a bidirectional LSTM network and a CRF layer. Furthermore, +Transformer (Vaswani et al. 2017) is proposed with atten- +tion mechanism to learn long-range dependencies, which +demonstrates significant improvement in efficiency and per- +formance. However, both traditional and DL models require +a large amount of training data with exact annotations, which +is financially expensive and prohibitively time-consuming. +Incorporating semi-supervised learning to sequence label- +ing models (e.g. semi-supervised CRFs and semi-SVM) can +partly lighten the burden of sequential annotations, but this +learning mechanism still needs exact labelings. +Crowdsourcing provides an effective tool to collect large la- +beled dataset. Existing crowdsourcing learning models can be +grouped into two types: wrapper and joint models. The former +uses the inferred ground-truths from crowds for subsequent +classifier learning while joint models simultaneously estimate +annotators’ reliability and learn the prediction model. Dawid +& Skene (DS) (Dawid and Skene 1979) aggregation model +and its variants (e.g. GLAD (Whitehill et al. 2009)) explore +different ways to model the relationship between the ground- +truth, annotators’ reliability and corresponding annotations, +and then use Expectation Maximization (EM) approach to +estimate the ground-truth labels and annotators’ reliability. +Sembler (Wu, Fan, and Yu 2012) and HMM-Crowd (Nguyen +et al. 2017) models are proposed to aggregate multiple annota- +tions to learn sequence model. Simpson and Gurevych (2018) +further took label dependency in sequential annotation into +consideration and used a Bayesian approach to model crowd +sequential labels. It should be addressed that the above crowd- +sourcing learning models assume only one ground-truth and +do not consider the inter-disagreement among annotators. As +a result, these models fail to capture inherent semantic ambi- +guity in NLP tasks and preserve confusing label information. +To explore inter-disagreement between annotators, Plank, +Hovy, and Søgaard (2014a) derived a label confusion ma- +trix from doubly gold annotations and showed that the POS +tagging classifier sensitive to confusing label information +is more robust. Sober´on et al. (2013) proposed CrowdTruth +methodology to model ambiguity in semantic interpretation +and treat all reliable annotator’s labelings on ambiguous +cases as high quality annotations. Dumitrache, Aroyo, and +Welty (2019) used ambiguity-aware ground-truths to train +the classifier for open-domain relation extraction and the re- +sults showed that ambiguity-aware crowds are better than +the experts regarding the quality and efficiency of annotation. +However, CrowdTruth based models only preserve multiple +ground-truths for ambiguous instances and ignore confusing +label information that can benefit robust classifier learning. +In recent years multi-label crowdsourcing has been devel- +oped to identify multiple true labels from crowds for multi- +label tasks. Different from discovering inter-disagreement be- +tween annotators in single-label crowdsourcing, most multi- +label crowdsourcing methods assume that multiple ground- +truths are assigned by one annotator. For example, Zhang and +Wu (2018) extended generative single-label crowdsourcing +method by combining the correlation among labels while Li +et al. (2018) further utilized neighbors’ annotation and effort- +saving annotating behavior of each annotator to jointly esti- +mate annotators’ expertise and multi-label classifier. There +have also been some research works exploring multi-label +crowd consensus (Yu et al. 2020; Tu et al. 2020) with the +assumption that reliable annotators share the same label cor- +relations, which fails to preserve inter-disagreement among +reliable annotators, though. + +Twitter data: ... +How to make Daily +out +ADV ADP VERB +NOUN +VERB +ADJ +Annotations: : +ADV PRT VERB +ADJ +VERB +ADV +Twitter data: ... +like one of +your French +fry + girls +CONJ +NOUN ADP +DET +ADJ +NOUN NOUN +Annotations: +ADJ +NOUN ADP +DET +NOUN +NOUN NOUNADJ +0.8 +ADP - +ADV - +CONJ + 0.6 +DET - +FLAG +NOUN - + 0.4 +NUM - +PRON +PRT - + 0.2 +VERB ++ +DET +FLAG NOUN NUM PRON PRTVERB + 0.0 +ADJ +ADP +ADV +CONJ +XProposed framework +The proposed framework LA-SCA contains three parts: infer +ground-truths and reliable annotators by hierarchical mod- +eling of crowds; obtain confusing label information from +inter-disagreement between reliable annotators on ambiguous +elements via a low rank model; incorporate label confusion +information in cost-sensitive sequence labeling. The details +are described as follows: +Hierarchical Modeling for Crowd Annotations +Let Y = {yi1, yi2, ..., yiL}N +i=1 denotes the crowd annotations +provided by L annotators over N instances. Each annotator +l is belonged to a cluster c ∈ {1, 2, ...C} and characterized +by a confusion matrix Ψcl ∈ [0, 1]T ×T where T denotes the +size of possible label set for Y . +We assume that the annotators in the same cluster have +similar reliability but the corresponding annotations could +be different. For example, annotators with lower reliability +will provide various annotations in labeling a specific in- +stance while reliable annotators have different opinions on +ambiguous instances. To preserve disagreement between the +annotators in the same cluster, we use the following hierar- +chical prior on each row of the confusion matrix Ψcl: +ηc +t ∼ Exponential(λt), ηc +t > 0, +(1) +βc +t ∼ Dirichlet(αt), 0 < βc +tj < 1, +T +� +j=1 +βc +tj = 1, +(2) +Ψcl +t ∼ Dirichlet(ηc +tβc +t), +(3) +yil ∼ Multinomial(Ψcl +zi), +(4) +where cl denotes that annotator l belongs to cluster c and zi +is the ground truth label of ith instance. ηc +t and βc +t can be +understood as the precision and mean of Ψcl +t respectively. +Besides, the cluster assignment cl and the ground truth zi +follows multinomial distribution as follows: +cl ∼ Multinomial(ν), +(5) +zi ∼ Multinomial(γ), +(6) +where ν and γ are sampled from Dirichlet(ϵν) and +Dirichlet(ϵγ) respectively. +We employ collapsed Gibbs sampling (Griffiths and +Steyvers 2004; Lakkaraju et al. 2015) to estimate the con- +ditional distribution over hidden variables zi and cl (more +computation details can be found in Technical Appendix A). +Let c and z denote the cluster assignments and true labels +respectively, c−l indicates that annotator l is excluded from +the cluster assignment and z−i excludes ith instance. The +conditional distribution of cluster assignment of annotator l +given the rest variables is computed as: +p(cl = c|c−l, z, Y, η, β) +∝ p(cl = c|c−l) × p(Y l|Y −l, z, c, η, β) +∝ (n−l +c + ϵυ/C) × +� +t +Γ(ηc +t) +Γ(nlt + ηc +t) +� +s +Γ(nlts + ηc +tβc +ts) +Γ(ηc +tβc +ts) +, +(7) +where n−l +c +denotes the number of annotators (exclude l) as- +signed to cluster c. nlt is the number of instances that are +annotated by l and have true label t. nlts denotes the number +of instances that are annotated with label s by l and have true +label t. +Similarly, p(zi = t|z−i, Y, c, η, β) is given as +p(zi = t|z−i, Y, c, η, β) +∝ p(zi = t|z−i) × p(yi|Y −i, zi = t, z−i, c, η, β) +∝ (n−i +t ++ ϵγ/T) × +� +c +� +cl=c +� +s(n−i +lts + ηc +tβc +ts)I(yil=s) +n−i +lt + ηc +t +, +(8) +where ni +t denotes the number of instances (exclude i) with +true label t. ni +lt denotes the number of instances (exclude i) +that are annotated by l and have true label t. nlts denotes the +number of instances (exclude i) that are annotated with label +s by l and have true label t. +Due to non-conjugacy of Exponential and Dirichlet +prior for the likelihood function p(Y |z, c, η, β), we use +Metropolis-Hastings (MH) algorithm (Chib and Greenberg +1995) to estimate the conditional posterior distribution +p(βc +tj|βc +t(∼j), ηc +t, Y c) and p(ηc +t|βc +t, Y c) for each cluster, and +the symmetric proposal distribution (i.e. uniform distribution) +is selected to simulate a candidate sample (algorithm details +are presented in Technical Appendix C Algorithm 1 and 2). +p(βc +tj|βc +t(∼j), ηc +t, Y c) is given as +p(βc +tj|βc +t(∼j), ηc +t, Y c) ∝ p(Y cj|Y −cj, βc +t, ηc +t) × p(βc +tj|βc +t(∼j)) +∝ +� +cl=c +� +Γ(nltj + ηc +tβc +tj) +Γ(ηc +tβc +tj) +Γ(nltt + ηc +tβc +tt) +Γ(ηc +tβc +tt) +� +× +� +βc +tj +1 − uc +tj +�λtαtj−1 � +1 − +βc +tj +1 − uc +tj +�λtαtt−1 +, +(9) +where βc +t += +� +βc +t1, ..., βc +tj, ..., βc +tT +� +. uc +tj += +1 − +�T +s=1,s̸=t,s̸=j βc +ts. The conditional posterior distribution of +βc +tt is obtained via βc +tt = 1 − �T +j=1,j̸=t βc +tj. Derivation +details of p(βc +tj|βc +t(∼j), ηc +t, Y c) can be found in Technical +Appendix B. +p(ηc +t|βc +t, Y c) is defined as +p(ηc +t|βc +t, Y c) ∝ +� +cl=c +Γ(ηc +t) +Γ(nlt + ηc +t) +× +� +j +Γ(nltj + ηc +tβc +tj) +Γ(ηc +tβc +t) +× λte−(λtηc +t ). +(10) +(Sabetpour et al. 2021) +By iteratively estimating zi, cl, βc +t and ηc +t until con- +vergence, annotators that have similar reliability could be +grouped into the same cluster. Since inter-disagreement +among experts could reveal linguistically ambiguous cases, +to identify the cluster with reliable annotators, we compute +the shared confusion matrix of each cluster based on the esti- +mated ground truths z. One ground-truth for each instance + +does not affect estimation of the shared confusion matrix as +ambiguous cases only take up a very small part in the whole +dataset (Plank, Hovy, and Søgaard 2014a). The entry of the +shared confusion matrix M c ∈ RT ×T for the cluster c is +defined as +M c +t,s = +� +i I(zi = t) � +l∈c I(yil = s) +� +i I(zi = t) +, +(11) +and +the +high-reliability +cluster +is +obtained +with +arg maxc +�T +t=1 M c +t,t +� +T. +Identifying Ambiguity via Low Rank Model +Based on the identified reliable annotators, to estimate am- +biguity degree of each element in a sequence, we assume +that in the high-reliability cluster the decisions of annotators +form small groups for ambiguous elements and annotators +who is more reliable in labeling this sequence is consistent +with other annotators for unambiguous elements. Inspired +by the quantitative formula used to describe the relationship +between the size of group, annotator’s reliability and task +clarity (Tian and Zhu 2012), we construct an L × N s matrix +A for sth sequence where N s is the length of the sequence. In +this matrix, each entry A(l, N s +j ) denotes the size of group for +annotator l involved in labeling jth element in sth sequence. +We define A(l, N s +j ) as +A(l, N s +j ) = ωs +l × µs +j, +(12) +where ωs +l represents the reliability of annotator l in labeling +sth sequence and µs +j is the degree of unambiguity of jth +element. +Intuitively, if the annotator is more reliable or the element +is less ambiguous, the size of group is more larger. Thus +we employ rank-1 factorization to formulate the relationship +between A(l, N s +j ), ωs +l and µs +j. The degree of unambiguity of +each element in sth sequence is computed as follows: +As = UΛV T , +(13) +ωs = U.1 +� +Λ11, +(14) +µs = V.1 +� +Λ11, +(15) +where ωs = [ωs +1, ωs +2, ..., ωs +L] and µs = [µ1, µ2, ..., µNs]. +There are three steps concerning identifying ambiguity: +a. identify ambiguous elements. We rank the set of esti- +mated degree of unambiguity for the whole sequential data +and choose an appropriate percentage p to identify the ele- +ment that falls in the range of top p minimum as ambiguous +cases. +b. compute inter-disagreement between annotators. +For the identified ambiguous cases, the disagreement among +reliable annotators provides multiple possible ground-truths. +Let {yjt}t=L′ +t=1 (L′ <= L) denotes the set of labels assigned +by annotators for jth ambiguous elements in sth sequence, +the score of yjt can be defined as +S(yjt) = +�L +l=1 I(yjl = yjt)ωs +l +�L +l=1 I(yjl = yjt) +. +(16) +In practice ambiguous instances have limited gold annota- +tions (Plank, Hovy, and Søgaard 2014b). We select top two +labels for each ambiguous element by S(yjt) in descending +order and combine them with the inferred ground-truth in +hierarchical modeling. +c. obtain confusing label information. Label confusion +matrix CF ∈ RT ×T is utilized to show the degree of confu- +sion between label pairs, and the entry CF(i, j) is defined as +the mean of p(z(x) = i, z(x) = j) and p(z(x) = j, z(x) = +i) where p(z(x) = i, z(x) = j) is computed as +p(z(x) = i, z(x) = j) = +� +k I(z(xk) = i, z(xk) = j) +� +k I(z(xk) = i) +, +(17) +where k denotes kth element in the whole sequential dataset, +and p(z(x) = j, z(x) = i) is computed in a similar way. +Cost-sensitive sequence labeling +Given {xi, zi}N +i=1 sequential dataset, where zi are the in- +ferred ground-truths via hierarchical Bayesian modeling. Tra- +ditional training criteria is to maximize the likelihood of +conditional log-linear model, which does not distinguish +the ground-truth from all incorrect outputs that are penal- +ized equally through normalization. To improve sequence +labeling, we employ cost-sensitive mechanism to incorporate +confusing label information in the training, where the label +sequence that is more confusing with the ground-truth incurs +lower cost. The objective of cost-sensitive sequence labeling +is defined as +LCS(θ) = +N +� +i=1 +log +exp +� +θT f(xi, zi) +� +� +zj +cost(zj, zi) exp {θT f(xi, zj)}, +(18) +where f(xi, zi) denotes the feature function. cost(zj, zi) is +used to measure the influence of confusing label information +on the loss. A weighted Hamming loss is defined to describe +cost(zj, zi) as +cost(zj, zi) = 1 +Ki +Ki +� +k=1 +(1 − p(zjk, zik)) ∗ (zjk ⊕ zik) , +(19) +where Ki is the number of tokens in ith sequence. ⊕ is the +XOR boolean operator, p(zjk, zik) is obtained from label +confusion matrix. +Experiments +We conduct experiments on POS tagging and NER for En- +glish. It is widely debatable of POS analysis where there are +many hard cases that annotators disagree on (Plank, Hovy, +and Søgaard 2014b), while in NER the definition and parti- +tion of named entity still remains arguable. In the following +sections, we present quantitative results to investigate the +effectiveness of our framework in inferring the ground-truths, +predicting unknown sequences and preserving confusing la- +bel information. + +Datasets +Current published datasets cannot satisfy both crowd anno- +tations and multiple gold annotations. We employ multiple +gold-annotated and crowd-annotated datasets as follows: +POS tagging: Most POS tagging datasets only contain +one gold annotation which fail to identify hard cases. There- +fore we use three twitter POS tagging datasets in the work of +studying cost-sensitive POS tagger (Dumitrache, Aroyo, and +Welty 2019), which include 500 tweets with doubly gold an- +notations ( denoted as T-DGA for simplicity), RITTER-TEST +(118 tweets) dataset and INHOUSE (200 tweets) dataset. We +employ T-DGA as training data (doubly gold annotations +guarantee the existence of hard cases), and RITTER-TEST +and INHOUSE as test datasets1. +NER: CoNLL-2003 shared NER dataset (Sang and +De Meulder 2003) is one of the most common benchmarks +used in NLP community for sequence labeling, which con- +tains four types of entities: persons (PER), locations (LOC), +organizations (ORG) and miscellaneous (MISC). Rodrigues +et al. (Rodrigues, Pereira, and Ribeiro 2014) put 400 articles +from CoNLL-2003 on Amazon’s Mechanical Turk to collect +crowd annotations. There are total 47 annotators and the av- +erage number of annotators per article is 4.9. In this paper, +after pre-processing these crowd-labeled data we select 3000 +sentence-level sequences, and use CoNLL 2003 test data2. +Baselines +We use the following six models to learn from crowd sequen- +tial data as baselines. +MVtoken (Sang and De Meulder 2003): The ground-truth +label sequence is obtained by choosing the label with more +votes in token level. +DS (Dawid and Skene 1979): The EM algorithm is em- +ployed to assign weight to each vote in token level. +MACE (Hovy et al. 2013): By including a binary latent +variable that denotes if and when each annotator is spamming, +the model can identify which annotators are trustworthy and +produce the true label. +Sembler (Wu, Fan, and Yu 2012): The model extends +crowdcoursing learning on instance level to sequence level +and jointly estimate annotators’ reliability and sequence +model. +HMM-Crowd (Nguyen et al. 2017): Based on HMMs, the +model further models the “crowd component” by including +the parameters for the label quality of annotators and crowd +variables. +HC-CLL: To verify the effectiveness of cost-sensitive se- +quence labeling, we also train the sequence prediction model +by maximizing conditional log-likelihood. +Experimental setting +Synthetic crowd annotations: As T-DGA does not have real +crowd annotations, we simulate annotators with different re- +liability by controlling the precision of their annotations. In +practice the number of annotators is limited, we set the total +1Both RITTER-TEST and INHOUSE have only one gold anno- +tation. +2CoNLL 2003 testset has only one gold annotation. +number of annotators as 15 and arrange three different as- +signments: [5, 5, 5], [8, 4, 3] and [3, 4, 8]. In each assignment, +three different ranges of precision: [0.9, 0.7], [0.7, 0.4] and +[0.4, 0.1] are set to indicate various reliability from high to +low levels. +LA-SCA framework: The optimal number of clusters for +annotators is selected between the range [2, 5] based on +Bayesian information criteria. λt is set to 2. To confirm that +crowd annotations are better than randomly labeling, the di- +agonal of αt is set to 0.7 while the off diagonal elements +are set to 0.3. Furthermore, we select p = 10% to identify +ambiguous elements. +Experimental results +Comparing with baselines +We evaluate the effectiveness +of the proposed framework in inferring ground-truths for +training data and predicting testset. +POS tagging task: +For simplicity, we denote three different crowd annotations +[8, 4, 3], [5, 5, 5] and [3, 4, 8] as ca1, ca2 and ca3, respectively. +Table 1 shows accuracy of inferring ground-truths in T-DGA +dataset (HC-CLL is the same as LA-SCA in inferring ground- +truths). We can see that most of crowd models achieve better +performance by increasing the proportion of high quality an- +notations. The performance of each comparing model varies +in Gold 1 and Gold 2 as these two gold annotations have +different label assignments for some tokens. For the case +of low quality annotations (i.e. ca3), the developed hierar- +chical Bayesian model effectively identifies the annotators +with high reliability, which can help guide the estimation +of ground-truths and thus improves the performance. DS +and HMM-Crowd achieve competitive results as the mech- +anism of iteratively estimate annotators’ reliability and the +ground-truths alleviates the negative effect of low quality +annotations. +Table 1: Accuracy of inferring ground-truths for T-DGA +dataset (%). +Model +Gold1 +Gold2 +ca1 +ca2 +ca3 +ca1 +ca2 +ca3 +MVtoken +91.82 +90.84 +83.70 +81.94 +81.81 +73.20 +DS +93.34 +90.97 +91.99 +83.53 +81.90 +82.14 +MACE +89.80 +84.79 +84.56 +80.76 +76.26 +75.89 +Sembler +93.05 +89.58 +85.78 +83.36 +80.22 +76.99 +HMM-Crowd +93.40 +91.59 +90.38 +83.72 +82.06 +81.12 +LA-SCA +93.30 +92.59 +92.22 +83.47 +81.73 +83.71 +Table 2 reports F1 score of comparing methods on +RITTER-TEST and INHOUSE datasets. Generally the model +that learns from higher quality of ground-truths can achieve +better prediction performance. For the wrapper models that +input the inferred ground-truths to the sequence model (i.e. +MVtoken, DS and MACE), prediction performance heav- +ily depends on the quality of the estimated ground-truths. +Therefore in ca3 setting, the F1 score of wrapper models (i.e. +MVtoken, DS and MACE) is lower than that of joint models +(i.e. Sembler and HMM-Crowd). The developed hierarchical + +Bayesian model HC-CLL effectively identifies the cluster +with high reliability which enables stable performance in +handling low quality annotations. Compared with HC-CLL, +LA-SCA achieves better results in ca1 and ca2 settings as +low quality crowd annotations (i.e. ca3) fail to provide ef- +fective confusing label information, which is more likely to +add much noise in cost-sensitive sequence labeling and then +degrades prediction performance. +Table 2: Performance of the models on RITTER-TEST and +INHOUSE dataset (%). +Model +RITTER- TEST +INHOUSE +ca1 +ca2 +ca3 +ca1 +ca2 +ca3 +MVtoken +59.35 +58.72 +58.72 +53.15 +52.29 +48.03 +DS +67.58 +60.43 +58.69 +54.33 +48.49 +48.14 +MACE +59.89 +58.04 +60.05 +47.92 +53.33 +49.12 +Sembler +59.82 +61.74 +60.53 +49.65 +50.22 +49.93 +HMM-Crowd +61.10 +61.30 +61.79 +49.98 +49.12 +52.40 +HC-CLL +61.18 +65.12 +62.57 +52.97 +54.55 +52.88 +LA-SCA +66.20 +67.32 +61.57 +55.65 +57.25 +50.84 +NER task: +In NER tagging task class “O” accounts for a great pro- +portion of the total classes, thus we use F1 score instead of +accuracy to report the performance of inferring ground-truths +for training data of CoNLL 2003 NER task. As shown in Ta- +ble 3, the developed hierarchical Bayesian model achieves the +best F1 score and DS model also achieves competitive result. +Table 3 also demonstrates the performance of predicting la- +bels for testing data. Due to limited crowded training data the +overall performance of comparing methods is well below the +reported results (Rodrigues, Pereira, and Ribeiro 2014). The +proposed framework LA-SCA still outperforms the baselines +but only by a narrow margin. Since cost-sensitive learning +mechanism inevitably produces label noises in NER task as +there are a few confusing labels that should be attended to +each other, directly maximizing log-likelihood can be com- +petitive with cost-sensitive maximization. +Table 3: Evaluation on CoNLL 2003 NER task (%). +Model +Infer ground-truths +Prediction +MVtoken +63.17 +38.52 +DS +65.32 +39.21 +MACE +60.07 +37.10 +Sembler +63.25 +38.87 +HMM-Crowd +63.44 +39.31 +HC-CLL +67.54 +40.56 +LA-SCA +67.54 +41.56 +Identifying ambiguous cases +In this section, we investi- +gate the performance of LA-SCA in identifying ambiguous +cases and preserving confusing label information. We present +the results on T-DGA dataset (ca1 setting) as it provides the +standard for comparison. +First, we measure the performance of identifying ambigu- +Figure 3: Two examples from T-DGA with gold and derived +labelings on ambiguous cases. +ous cases with the following measures: +acc1 = #correctly identified ambiguous cases +#all ambiguous cases +, +(20) +acc2 = #correctly double annotated ambiguous cases +#all ambiguous cases +, +(21) +and we obtain that acc1 = 725/931 = 0.779 and acc2 = +614/931 = 0.660. It can be concluded that LA-SCA suc- +cessfully identifies most of ambiguous cases in T-DGA. We +further present two examples from T-DGA with gold and +derived labelings on ambiguous cases, as demonstrated in +Figure 3, LA-SCA identifies ambiguous cases with label +confusing pairs of [“ADJ”, “NOUN”] and [“DET”, “ADV”] +successfully. +(a) Gold matrix. +(b) Derived matrix (ca1). +(c) Derived matrix (ca2). +(d) Derived matrix (ca3). +Figure 4: Comparison between the gold the derived label +confusion matrices on T-DGA. +Figure 4 shows the gold label confusion matrix and the +derived confusion matrices of three settings respectively. As + +Tweet: +How +to +make +Daily +is +out +PRT +ADJ +ADJ +Gold: +ADP +NOUN +ADV +ADJ +ADJ +Derived: +NOUN +ADPTweet: +Why +barefoot +isn't +best +for +most +runners +ADJ +DET +Gold: +ADV +ADV +DET +Derived: +ADV0.94 +0 +0.00061 +0 +0.0017 +0 +0.023 +0.002 +0 +0 +0 +0.00098 0.18 +ADJ - +0.76 +0.0028 0.038 0.0019 0.022 +0 +0.052 +0.11 +0 +0.0085 0.023 0.0038 + 0.8 +ADP 0.000610.0028 +0.9 +0.012 0.044 0.0019 +0 +0.000480.003 0.0014 +0.28 +0.00270.01 +ADV - +0 +0.038 0.012 +0.81 +0.025 0.011 +0.25 +0.011 +0 +0.0045 0.033 0 +0.017 0.013 +CONJ-0.00170.0019 0.044 +0.025 +0.84 +0.01 +0 +0.0046 +0 +0.018 +0 +0.0019 0.012 + 0.6 +DET - +0 +0.022 0.0019 0.011 +0.01 +0.88 +0 +0.01 +0.052 +0.042 +0 +0.000840.0013 +FLAG - 0.023 +0 +0 +0.25 +0 +0 +0 +0.11 +0 +0.12 +0 +0 +0 +NOUN-0.0020 +0.0520.000480.0110.0046 0.01 +0.11 +0.94 +0.054 0.0012 0.011 +0.017 +0.17 + 0.4 +NUM - +0.11 +E00'0 +0 +0 +0.052 +0 +0.054 +0.67 +0.0045 +0 +0 +0.0029 +PRON - +0 +0 +0.00140.0045 0.018 0.042 +0.12 +0.00120.0045 +0.9 +0 +0.0041 +0 +PRT - +0 +0.0085 +0.28 +EEO'O +0 +0 +0 +0.011 +0 +0 +0.35 +0.049 + 0.2 +0 +VERB .000980.023 0.0027 0.017 0.00190.00084 +0 +0.017 +0 +0.0041 +0 +0.94 +0.018 +X- 0 +0.18 +0.0038 +30.01 +0.013 +0.012 0.0013 +0 +0.17 +0.0029 +0 +0.049 0 +0.018 +0.29 +FLAG NOUN NUMI + 0.0 +ADJ +ADP +ADV +CONJ +DET +PRON +PRT +VERB +x0.79 +0.017 0.012 0.028 0.033 0.023 0.076 0.024 0.033 0.022 0.059 0.015 +0.15 +- 0.8 +ADJ -0.017 +0.57 +0.027 0.048 0.019 0.031 0.053 0.0740.1 +0.013 0.031 0.038 0.019 +- 0.7 +ADP -0.012 0.027 +0.71 +0.035 0.049 0.016 0.036 0.034 0.026 0.018 +0.14 +0.022 0.033 +ADV-0.028 0.048 0.035 +0.54 +0.023 0.033 +EO'0 +0.056 0.012 0.013 0.022 0.042 0.021 + 0.6 +CONJ -0.033 0.019 0.049 +0.023 +0.53 +0.0220.0130.079 0.011 +0.029 0.0088 0.025 0.023 + 0.5 +DET -0.023 0.031 0.016 +0.033 0.022 +0.68 +0.036 +0.049 0.045 +0.041 0.021 0.025 0.014 +FLAG - 0.076 0.053 0.036 +0.03 0.013 0.036 +0 +0.14 +0.017 0.071 0.031 0.049 0.025 + 0.4 +NOUN 0.024 0.074 0.034 0.056 0.079 0.049 +0.14 +0.82 +0.11 +0.054 +0.1 +0.046 +0.18 +NUM - 0.033 +0.1 +0.026 0.012 0.011 0.0450.017 +0.11 +0.39 +0.022 0.014 0.029 0.014 +E'O +PRON-0.022 0.013 0.018 +0.0130.0290.0410.071 +0.054 0.022 +0.62 +0.00540.028 0.013 + 0.2 +PRT- +0.059 0.031 +0.14 +0.022 0.0088 0.021 +0.031 +0.1 +0.014 0.0054 0.17 +0.04 +0.047 +VERB-0.015 0.038 0.022 +0.042 0.025 0.0250 +0.049 +0.046 0.029 +0.028 +0.04 +0.75 +0.044 + 0.1 +X - 0 +0.15 +0.019 0.033 +0.021 +0.023 0.014 0.025 +0.18 +0.014 0.013 0.047 0.044 +0.16 + 0.0 +ADJ +ADP +ADV +CONJ +DET +FLAG NOUN NUM PRON +PRT +VERB0.69 +0.038 0.038 0.038 0.058 0.033 0.066 0.033 0.037 0.037 0.055 0.029 +0.13 +- 0.7 +ADJ - 0.038 +0.46 +0.028 0.039 0.035 0.044 0.045 +60°0 +0.0830.027 0.049 +0.05 +0.026 +ADP +-0.0380.028 +0.59 +0.038 +0.046 0.033 +0.05 +0.051 +0.046 +0.028 +0.11 +EO'0 +0.035 +- 0.6 +ADV-0.038 0.039 +8E0'0 +8E0 +0.041 0.036 +50.006 +0.11 +0.024 +0.031 +0.027 +0.04 +0.032 +0.0580.035 +0.046 +0.041 +0.37 +0.045 +0.031 +0.091 +0.013 +0.024 +0.021 +0.027 +0.034 + 0.5 +CONJ +- +DET - 0.033 +0.044 0.033 +0.036 +0.045 +0.54 +0.036 +0.073 +0.04 +0.05 +0.026 +EEO'O +0.02 + 0.4 +FLAG -0.066 0.045 +0.05 +0.006 0 +0.031 0.036 0.025 +0.17 +0.03 +0.034 0.021 +0.053 +0.034 +NOUN-0.033 +0.09 +0.051 +0.11 +0.091 0.073 +0.17 +0.73 +0.12 +0.091 +0.11 +0.056 +0.19 + 0.3 +NUM-0.037 0 +0.083 +0.046 +0.024 0.013 +0.04 +EO'0 +0.12 +0.32 +0.028 0.029 +0.031 0.023 +PRON -0.037 0.027 +0.028 +0.031 +0.024 +0.05 +0.034 +0.091 +0.028 +0.44 +0.029 +0.047 0.028 + 0.2 +PRT - +0.055 0 +0.049 +0.11 +0.027 +0.021 +0.026 +0.021 +0.11 +0.029 +0.029 +0.13 +0.063 +0.035 +VERB - 0.029 +0.05 +E0'0 +0.04 +0.027 +EEO'O +0.053 +0.056 +0.031 +0.047 +0.063 +0.67 +0.026 + 0.1 +X- +0.13 +0.026 +0.035 +0.032 +0.034 +0.02 +0.034 +0.19 +0.023 +0.028 +0.035 +0.026 +0.15 +ADJ +ADP +ADV +CONJ +DET +FLAG I +NOUN NUM +PRON +PRT +VERB +X0.69 +0.031 0.032 0.052 0.062 0.033 0.078 0.037 0.036 0.035 0.062 0.032 +0.12 +- 0.7 +ADJ - 0.031 +0.53 +0.025 0.039 0.033 0.034 0.0290.083 +0.09 +0.017 0.031 0.038 0.023 +ADP -0.032 0.025 +0.61 +0.037 0.053 0.025 +0.05 +0.049 +0.04 +6E0'0 +0.12 +0.026 0.032 + 0.6 +ADV-0.052 0.039 0.037 +0.39 +0.036 0.034 0.032 +60°0 +0.031 +0.04 +0.025 +0.044 0.028 +CONJ -0.062 0.033 0.053 +0.036 +EEO +0.039 0.028 +0.096 +0.02 +0.044 0.028 +0.0350.022 +- 0.5 +DET -0.033 0.034 0.025 +0.57 +LEOO +0.071 +0.04 +0.053 +0.025 +0.026 0.031 +- 0.4 +FLAG -0.078 0.029 +0.05 +0.032 0.028 0.037 0.0099 ( +0.17 +EEO'O +3 0.036 0.019 0.041 0.035 +NOUN-0.037 0.083 +0.049 +0.09 +0.096 0.071 +0.17 +0.74 +0.11 +0.083 +0.14 +0.055 +0.19 +EO- +NUM - 0.036 +60°0 +0.04 +0.031 +0.02 +0.04 +EEO'O +0.11 +EO +0.034 0.027 0.045 0.031 +PRON-0.035 0.017 +6E0'0 +0.04 +0.044 0.053 +0.036 +0.083 0 +0.034 +0.43 +0.021 +0.04 +0.026 + 0.2 +PRT - +0.062 0 +0.031 +0.12 +0.025 +0.028 +0.025 +0.019 +0.14 +0.027 +0.021 +0.11 +0.031 +0.038 +VERB - 0.032 0 +0.038 0.026 +0.044 +0.035 +0.026 +0.041 +0.055 0 +0.045 +0.04 +0.031 +0.68 +0.051 + 0.1 +X- 0 +0.12 +0.023 0.032 +0.028 +0.022 +0.031 +0.035 +0.19 +0.031 +0.026 +0.038 0 +0.051 +0.13 +ADJ +ADP +ADV +CONJ +DET +FLAG NOUN NUM F +PRON +PRT +VERB +X(a) Cluster 1 +(b) Cluster 2 +(c) Cluster 3 +Figure 5: Shared confusion matrices of three clusters on POS tagging task. +crowds contain noisy label information, the agreement for +some labels is lower than the gold one, which may gen- +erate wrong confusing label information. For example, in +the twitter “FollowerSale is most Trusted company to buy”, +the gold annotation of “to” is [“VERB”] while the derived +confusing label set is [“VERB”, “PRON”]. But the derived +matrices also preserve some confusing label information that +is similar to the gold. For example, the agreement between +adjectives [“ADJ”] are nouns [“NOUN”], and [“X”] category +is more likely to be confused with punctuations [“.”] and +nouns [“NOUN”]. +Interpreting clusters +Clustering for crowd annotations +can help discover common patterns of the annotators with +similar reliability. Here we demonstrate how to interpret the +shared confusion matrices of the estimated clusters in POS +tagging and NER tasks. +POS tagging task: +We choose ca1 setting and present the shared confusion +matrices of three clusters. By reviewing the diagonal ele- +ments in three shared confusion matrices of Figure 5, we +can see that the developed Bayesian hierarchical model sepa- +rates the annotators with different reliability well. Cluster 1 +demonstrates the annotators with high reliability where the +average successful identification value is above 0.7, while the +annotators with lower reliability are clustered into the third +cluster where the average successful identification value is +below 0.3. +NER task: +We present clustering results of crowd annotations col- +lected from AMT. Generally these crowd annotations are +of good quality. It can be seen from Figure 6 that both two +clusters are reliable in assigning “I-PER”, “O”, “I-LOC” and +“B-ORG”. Cluster 1 shows the more reliable annotations +where the average diagonal value is 0.742, and the average +diagonal value in cluster 2 is 0.535. +Discussion +The major concern regarding LA-SCA evaluation is to per- +form on the dataset with both crowd annotations and multiple +gold annotations. For T-DGA, we have to simulate crowd an- +notations and use precision to indicate annotator’s reliability +in global view. A situation will arise that simple cases are +more likely to be incorrectly annotated in the precision with +(a) Cluster 1. +(b) Cluster 2. +Figure 6: Shared confusion matrices of clusters on NER task. +[0.9, 0.7], which is not in line with the decision of reliable an- +notators. In the crowd-annotated CoNLL-2003 NER task, the +size of assigned annotators per article is limited and crowd +annotations are of good quality, which more or less hinders +exploration of labeling diversity. +While the incomplete datasets partially limit the applicabil- +ity of LA-SCA, the proposed hierarchical Bayesian modeling +shows its competitiveness in inferring ground-truths from real +crowd annotations and synthetic crowds with low reliability. +As cost-sensitive mechanism expects sparse label confusion +matrix in the task (e.g. NER) where only a few labels are +more confused with each other, it still remains to be explored +to achieve significant improvement in predicting unknown +sequences. +Conclusion +In this paper, we propose a framework called Learning Am- +biguity from Crowd Sequential Annotations (LA-SCA) to ex- +plore inter-disagreement between reliable annotators and ef- +fectively preserve confusing label information to improve ro- +bust sequence classifier learning. Experimental results show +that LA-SCA achieves competitive performance in inferring +ground-truth from crowds and predicting testset. Further, +identified clusters can help interpret labeling patterns of the +annotators with similar reliability, which can help task de- +signers improve labeling guideline. + +10 +0.88 +0.00170.0017 0.025 0.0017 +0.01 0.00087 0.01 +0.032 0.00260.0096 0.023 +ADJ 0.068 +0.11 +0.032 +0.073 +0.093 0.034 0.066 +0.076 0.085 +0.11 +0.066 +0.073 +0.11 +ADP - +0.12 +0.24 +0.046 +0.12 +0.0016 0.068 +0 +0.093 +60°0 +0.067 +0.077 +0.08 +0.8 +ADV - 0.077 +0 +0 +0.28 +0.072 +0 +0.072 +0 +0.067 +0.13 +0.057 +0.13 +0.11 +CONJ +10.035 +0 +0 +0.012 +0.87 +0 +0.012 +0 +0.0058 0.041 0.0058 +0 +0.023 +DET - +0.12 +0.044 +0.053 +0.093 +0.044 +0.036 +0.084 +0.13 +0.06 +0.073 +0.1 + 0.6 +0.16 +FLAG - +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +0 +0 +NOUN - 0.097 +0.039 +0.053 +0.095 0.037 +0.08 +0.18 +0.075 +0.1 +0.062 +0.071 +0.11 + 0.4 +NUM - +0.13 +0 +0.0045 +0 +0.11 +0.0045 +0 +0 +0.57 +0.081 +0.009 0.0045 0.094 +PRON +1-0.021 +0 +0 +EE0'0E800'0 +0 +0.00420.0042 0.025 +0.85 +0.0042 0.013 +0.033 +PRT - 0.054 +0 +0 +0.16 +0 +0.054 +0 +0.068 +0.11 +0.35 +0.054 +0.15 + 0.2 +VERB +660°0 +0 +0.00340.0045 0.087 0.0023 0.014 +0 +0.096 +0.1 +0.0011 +0.48 +0.11 +X-0.044 +0.00310.0062 0.034 +0.0093 +0 +0.0062 0.019 0.00930.0093 +0.86 +x + 0.0 +ADJ +ADP +ADV +CONJ +DET +FLAG NOUN NUM +PRON +PRT +VERB1.0 +0.62 +0 +0.0087 +0.01 +0.028 +0 +0.00170.0035 +0.03 +0.0330.0008 0.0026 +0.26 +ADJ - +0.23 +0.037 +0.076 +0.076 0.0024 0.027 +0.02 +0.15 +0.073 +0.024 +0.02 +0.27 +ADP - +0.21 +0.067 +0.08 +0.055 +0 +0.028 0.0098 +0.14 +0.078 +80.0150 +0.016 +0.8 +ADV +0.21 +0 +0.01 +0.1 +0.11 +0.031 +0.01 +0.13 +0.057 0.0052 0.036 +0.29 +CONJ +0.15 +0.023 +0.076 +0.32 +0 +0.0058 0.029 +0.093 +0.023 +0.012 +0.012 +0.26 +0.26 +0.038 +0.076 +0.062 0.0022 0.018 +0.016 +0.14 ++800 +0.016 +0.02 +0.27 +- 0.6 +DET +0 +FLAG +0 +0 +0 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +NOUN +0.22 +0 +0.04 +0.072 +0.074 0.0041 0.026 +0.032 +0.14 +0.082 +0.013 +0.024 +0.28 + 0.4 +NUM - +0.2 +0 +600°0 +0.022 +0.049 +0 +0 +0.013 +EEO +E90'0 +0 +0.013 +PRON +0.2 +0 +0.037 +0.058 +EEO'O +0 +0.0042 0.029 +0.11 +EEO +0 +0.0042 +0.2 +PRT - +0.2 +0.027 +0.068 +0.068 +0.014 +0.014 +0 +0.12 +0.068 +0.068 +0.014 +0.34 + 0.2 +VERB +0.2 +0 +0.032 +0.05 +0.072 +0 +0.012 +0.017 +0.15 +0.092 0.0023 0.074 +X- +0.17 +0.012 +0.016 +0.05 +0 +0 +0.0062 0.037 +0.037 0.00310.0031 + 0.0 +ADJ +ADP +ADV +CONJ +DET +FLAG NOUN NUM +PRON +PRT +VERB +x5sIw-1 +0.64 +0 +EO +0 +0.043 +0 +0.021 +0 +0 +I-PER +- +0 +0.98 +0.016 +0 +0 +0 +0 +0 +0 +0.8 +0.0081 +0.00017 +0.98 +0.001 +0.0017 +0.00017 +0.0071 +0 +0.00052 +B-PER +0 +0.012 +0.21 +0.76 +0 +0 +0.018 +0 +0 + 0.6 +0 +0.011 +0.18 +0 +0.67 +0.039 +0.0056 +0.079 +0.011 +I-LOC +EO'O +0 +0.061 +0 +0.18 +0.7 +0 +EOO +0 +- 0.4 +0.024 +0 +8E0 +0 +0 +0 +0.48 +0.012 +0.11 + 0.2 +B-LOC +0.0072 +0 +0.072 +0 +0 +0 +0.014 +0.84 +0.062 +B-ORG +0.011 +0 +0.21 +0.011 +0.015 +0 +0.019 +0.11 +0.61 + 0.0 +I-MiSC +I-PER +0 +B-PER +I-ORG +I-LOC +B-MISC +B-LOC +B-ORGI-MISC +0.56 +0.0031 +0.41 +0 +0.012 +0.0031 +0.012 +0 +I-PER +0.0076 +0.88 +0.1 +0.0017 +0.00085 +0.01 +0 +0.00085 +0 +0.8 +-0 +0.013 +0.0014 +0.98 +0.00018 +0.00036 +0.0013 +0.0013 +0.0006 +0.00027 +B-PER +0 +0.017 +0.55 +0.43 +0 +0.0013 +0 +0.0078 +0.00065 +0.6 +1-ORG +0.047 +0.11 +0.5 +0 +0.13 +0.13 +0.0031 +0.073 +0.0073 +I-LOC +0.064 +0 +0.18 +0 +0.0046 +0.71 +0 +0.041 +0.0046 +- 0.4 +B-MISC +0.018 +0.0036 +0.67 +0.0055 +0.0036 +0.0018 +0.13 +0.095 +0.066 +- 0.2 +B-LOC +0.0014 +0 +0.072 +0 +0 +0.0057 +0 +0.92 +0.0022 +B-ORG +0.0047 +0.0029 +0.52 +0.04 +0.0017 +0 +0.0023 +0.34 +0.082 + 0.0 +I-MiSC +I-PER +0 +B-PER +I-ORG +I-LOC +B-MISC +B-LOC +B-ORG- 1.0 +0.93 +0 +0 +0 +0.012 +0 +0 +0 +0.0061 +0 +0 +0.049 +ADJ -0.022 +0.54 +0.027 +0.049 +0.02 +0.037 +0.044 +0.0830.0220.037 +0.032 +0.046 0.044 +ADP-0.034 +0 +0.65 +0.029 0.057 +0 +0.029 +0 +0.021 0.036 +0.07 +0.034 0.036 + 0.8 +ADV -0.021 +0 +0 +0.73 +9E00 +0 +0.01 +0 +0.041 +0.0210.036 +0.057 0.052 +CONJ - 0.029 +0 +0 +0.017 +0.91 +0 +0 +0 +0 +0.023 0.0058 +0 +0.012 +DET - +0.036 0.027 +0.049 0.0089 0.033 +0.062 0.024 0.036 0.016 + 0.6 +0.06 +0.0022 0.042 +0.6 +FLAG - +0 +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +0 +0 +NOUN -0.037 +0 +0.031 +0.028 0.037 0.021 0.031 +0.64 +0.036 0.033 0.027 0.042 +0.042 + 0.4 +NUM -0.036 +0 +0 +0.045 0.0045 +0 +0.0045 +0.82 +0.036 0.0045 +0 +0.054 +PRON -0.0083 +0 +0 +0 +0.025 +0 +0 +0 +0 +0.97 +0 +0 +PRT - 0.027 +0 +0.014 +0.014 0.014 +0 +0.054 +0 +0.041 0.014 +0.66 +0.027 +0.14 + 0.2 +VERB -0.022 +0 +0 +0.0034 0.033 +0.00680.0011 0.031 +0.032 +0 +0.83 +0.045 +X-0.27 +0 +0.0031 +0 +0.016 0.00310.00310.00620.0062 0.019 0.0031 0.012 +0.66 + 0.0 +ADJ +ADP +ADV +CONJ +DET +FLAG NOUN NUM PRON +PRT +VERB +XA. Estimating the conditional distribution over +hidden variables zi and cl +a. Derivation of p(cl = c|c−l, z, Y, η, β) is as follows: +p(cl = c|c−l, z, Y, η, β) ∝ p(cl = c|c−l) × p(Y l|Y −l, z, c, η, β), +(22) +where p(cl = c|c−l) is computed as +p(cl = c|c−l) += p(c1, c2, .., cl = c)/p(c1, c2, ..., cl−1) += +Γ(ϵν) +Γ(L+ϵν) +�C +c=1 +Γ(nc+ϵν/C) +Γ(ϵν/C) +� +Γ(ϵν) +Γ(L+ϵν−1) +�C +c=1 +Γ(n−l +c +ϵν/C) +Γ(ϵν/C) += Γ(L + ϵν − 1) +Γ(L + ϵν) +Γ(nc + ϵν/C) +Γ(n−l +c + ϵν/C) += n−l +c + ϵν/C +L + ϵν − 1 . +(23) +The likelihood p(Y |z, c, η, β is obtained by integrating +out the variables Ψcl: +p(Y |z, c, η, β) += +� +c +� +l∈c +� +t +� +p(Y cl|Ψcl +t )p(Ψcl +t |ηcβc +t)d(ηcβc +t) += +� +c +� +l∈c +� +t +� +Γ(ηc +t) +Γ(nlt + ηc +t) +� +s +Γ(nlts + ηc +tβc +ts) +Γ(ηc +tβc +ts) +� +. +(3) +Then via Bayesian rule we obtain the conditional distribu- +tion: +p(Y l|Y −l, z, c, η, β) = � +t +� +Γ(ηc +t ) +Γ(nlt+ηc +t ) +� +s +Γ(nlts+ηc +t βc +ts) +Γ(ηc +t βc +ts) +� +. +(4) +b. Derivation of p(zi = t|z−i, Y, c, η, β) is as follows: +p(zi = t|z−i, Y, c, η, β) +∝ p(zi = t|z−i) × p(yi|Y −i, zi = t, z−i, c, η, β), +(5) +where p(zi|z−i) is computed as +p(zi = t|z−i) += p(z1, z2, .., zi = t)/p(z1, z2, ..., zi−1) += +Γ(ϵγ) +Γ(N+ϵγ) +�T +t=1 +Γ(nt+ϵγ/T ) +Γ(ϵγ/T ) +� +Γ(ϵγ) +Γ(N+ϵγ−1) +�T +t=1 +Γ(n−i +t ++ϵγ/T ) +Γ(ϵγ/T ) += Γ(N + ϵγ − 1) +Γ(N + ϵγ) +Γ(nt + ϵγ/T) +Γ(n−i +t ++ ϵγ/T) += n−i +t ++ ϵγ/T +N + ϵγ − 1 , +(6) +and p(yi|Y −i, zi = t, z−i, c, η, β) is obtained as +p(yi|Y −i, zi = t, z−i, c, η, β) += � +c +� +l∈c +� +Γ(ηc +t ) +Γ(nlt+ηc +t ) +� +s +Γ(nlts+ηc +t βc +ts) +Γ(ηc +t βc +ts) +� +Γ(ηc +t ) +Γ(n−i +lt )+ηc +t ) +� +s +Γ(n−i +lts)+ηc +t βc +ts) +Γ(ηc +t βc +ts) +� += +� +c +� +l∈c +� +s(n−i +lts + ηc +tβc +ts)I(yil=s) +n−i +lt + ηc +t +. +(7) +B +a. Derivation of p(βc +tj|βc +t(∼j), ηc +t, Y c) is as follows: +p(βc +tj|βc +t(∼j), ηc +t, Y c) ∝ p(Y cj|Y −cj, βc +t, ηc +t) × p(βc +tj|βc +t(∼j)). +(8) +First, βc +t follows dirichlet distribution: +βc +t|αt ∼ Dirichlet(αt), 0 < βc +tj < 1, +T +� +j=1 +βc +tj = 1, +(9) +where the detailed form is given as: +� +βc +t1, ..., βc +tj, ..., βc +tT +� +∼ Dirichlet (λtαt1, ..., λtαtj, ..., λtαtT ). +(10) +With the aggregation property we obtain that +� +βc +tj, uc +tj, βc +tt = 1 − βc +tj − uc +tj, +� +∼ Dirichlet (λtαtj, aj, λtαtt), +(11) +where +uc +tj = 1 − +T +� +s=1,s̸=t,s̸=j +βc +ts, +(12) +aj = +T +� +s=1,s̸=t,s̸=j +λtαts. +(13) +The joint probability p(βc +tj, uc +tj) is given as +p(βc +tj, uc +tj) = +� +l∈c +Γ(�T +j=1 λtαtj) +Γ(λtαtj)Γ(aj)Γ(λtαtt) +× (βc +tj)λtαtj−1(uc +tj)aj−1(1 − βc +tj − uc +tj)λtαtt−1. +(14) +The marginal probability p(uc +tj|ηc +t, Y c) is computed as +p(uc +tj) = +Γ(�T +j=1 λtαtj) +Γ(aj)Γ(λtαtt + λtαtj) +× (uc +tj)aj−1(1 − uc +tj)λtαtt+λtαtj−1. +(15) +Then the conditional distribution p(βc +tj|βc +t(∼j)) is given as +p(βc +tj|βc +t(∼j)) ∝ +� +βc +tj +1 − uc +tj +�λtαtj−1 � +1 − +βc +tj +1 − uc +tj +�λtαtt−1 +. +(16) +According to Equation (3) in Appendix A, it can be easily +obtained that +p(Y cj|Y −cj, βc +t, ηc +t) = � +l∈c +� Γ(nltj+ηc +t βc +tj) +Γ(ηc +t βc +tj) +Γ(nltt+ηc +t βc +tt) +Γ(ηc +t βc +tt) +� +. +(17) +Finally we obtain that +p(βc +tj|βc +t(∼j), ηc +t, Y c) ∝ p(Y cj|Y −cj, βc +t, ηc +t) × p(βc +tj|βc +t(∼j)) +∝ +� +l∈c +� +Γ(nltj + ηc +tβc +tj) +Γ(ηc +tβc +tj) +Γ(nltt + ηc +tβc +tt) +Γ(ηc +tβc +tt) +� +× +� +βc +tj +1 − uc +tj +�λtαtj−1 � +1 − +βc +tj +1 − uc +tj +�λtαtt−1 +. +(18) + +b. Derivation of p(ηc +t|βc +t, Y c) is as follows: +The joint posterior distribution p(ηc +t, βc +t|Y c) is given as +p(ηc +t, βc +t|Y c) ∝ +� +l∈c +Γ(ηc +t) +Γ(nlt + ηc +t) +� +j +Γ(nltj + ηc +tβc +tj) +Γ(ηc +tβc +t) +× +� +j +(βc +tj)λtαtj−1 × λe−(ληc +t ). +(19) +Then the conditional posterior distribution p(ηc +t|βc +t, Y c) is +obtained with +p(ηc +t|βc +t, Y c) ∝ � +l∈c +Γ(ηc +t ) +Γ(nlt+ηc +t ) +� +j +Γ(nltj+ηc +t βc +tj) +Γ(ηc +t βc +t ) +× λe−(ληc +t ). +(20) +C. Metropolis-Hastings algorithm simulating +βc +tj and ηc +t +Algorithm 1: Metropolis-Hastings algorithm simulating βc +tj. +1: Initialize βc0 +tj +2: for i = 1, 2, ... do +3: +Propose: βccand +tj +∼ U(0, min +� +2βci−1 +tj +, 1 +� +) +4: +Acceptance probability: +α(βccand +tj +|βci−1 +tj +) = min +� +1, +p(β +ccand +tj +|βc +t(∼j),ηc +t ,Y c) +p(β +ci−1 +tj +|βc +t(∼j),ηc +t ,Y c) +� +5: +u ∼ U(0, 1) +6: +if u < α then +7: +Accept the proposal: βci +tj ← βccand +tj +8: +else +9: +Reject the proposal:βci +tj ← βci−1 +tj +10: +end if +11: end for +Algorithm 2: Metropolis-Hastings algorithm simulating ηc +t. +Initialize ηc0 +t +2: for i = 1, 2, ... do +Propose: ηccand +t +∼ U(0, min +� +2ηci−1 +t +, 1 +� +) +4: +Acceptance probability: +α(ηcand +t +|ηi−1 +t +) = min +� +1, p(ηcand +t +|βc +t ,Y c) +p(ηi−1 +t +|βc +t ,Y c) +� +u ∼ U(0, 1) +6: +if u < α then +Accept the proposal: ηci +t ← ηccand +t +8: +else +Reject the proposal:ηci +t ← ηci−1 +t +10: +end if +end for +References +Chib, S.; and Greenberg, E. 1995. +Understanding the +metropolis-hastings algorithm. The american statistician, +49(4): 327–335. +Chu, X.; Ouyang, W.; Li, H.; and Wang, X. 2016. Crf-cnn: +Modeling structured information in human pose estimation. +arXiv preprint arXiv:1611.00468. +Dawid, A. P.; and Skene, A. M. 1979. Maximum likelihood +estimation of observer error-rates using the EM algorithm. +Journal of the Royal Statistical Society: Series C (Applied +Statistics), 28(1): 20–28. +Dumitrache, A.; Aroyo, L.; and Welty, C. 2019. A crowd- +sourced frame disambiguation corpus with ambiguity. arXiv +preprint arXiv:1904.06101. +Finin, T.; Murnane, W.; Karandikar, A.; Keller, N.; Martineau, +J.; and Dredze, M. 2010. Annotating named entities in twitter +data with crowdsourcing. In Proceedings of the NAACL HLT +2010 Workshop on Creating Speech and Language Data with +Amazon’s Mechanical Turk, 80–88. +Griffiths, T. L.; and Steyvers, M. 2004. Finding scientific +topics. Proceedings of the National academy of Sciences, +101(suppl 1): 5228–5235. +Hovy, D.; Berg-Kirkpatrick, T.; Vaswani, A.; and Hovy, E. +2013. Learning whom to trust with MACE. In Proceedings +of the 2013 Conference of the North American Chapter of the +Association for Computational Linguistics: Human Language +Technologies, 1120–1130. +Huang, Z.; Xu, W.; and Yu, K. 2015. +Bidirectional +LSTM-CRF models for sequence tagging. arXiv preprint +arXiv:1508.01991. +Juang, B. H.; and Rabiner, L. R. 1991. Hidden Markov +models for speech recognition. Technometrics, 33(3): 251– +272. +Lafferty, J.; McCallum, A.; and Pereira, F. C. 2001. Con- +ditional random fields: Probabilistic models for segmenting +and labeling sequence data. +Lakkaraju, H.; Leskovec, J.; Kleinberg, J.; and Mullainathan, +S. 2015. A bayesian framework for modeling human eval- +uations. In Proceedings of the 2015 SIAM International +Conference on Data Mining, 181–189. SIAM. +Li, S.-Y.; Jiang, Y.; Chawla, N. V.; and Zhou, Z.-H. 2018. +Multi-label learning from crowds. IEEE Transactions on +Knowledge and Data Engineering, 31(7): 1369–1382. +Nguyen, A. T.; Wallace, B. C.; Li, J. J.; Nenkova, A.; and +Lease, M. 2017. Aggregating and predicting sequence labels +from crowd annotations. In Proceedings of the conference. +Association for Computational Linguistics. Meeting, volume +2017, 299. NIH Public Access. +Plank, B.; Hovy, D.; and Søgaard, A. 2014a. Learning part- +of-speech taggers with inter-annotator agreement loss. In +Proceedings of the 14th Conference of the European Chapter +of the Association for Computational Linguistics, 742–751. +Plank, B.; Hovy, D.; and Søgaard, A. 2014b. Linguistically +debatable or just plain wrong? In Proceedings of the 52nd +Annual Meeting of the Association for Computational Lin- +guistics (Volume 2: Short Papers), 507–511. +Qiao, M.; Bian, W.; Da Xu, R. Y.; and Tao, D. 2015. Diver- +sified hidden Markov models for sequential labeling. IEEE +Transactions on Knowledge and Data Engineering, 27(11): +2947–2960. +Raykar, V. C.; Yu, S.; Zhao, L. H.; Valadez, G. H.; Florin, +C.; Bogoni, L.; and Moy, L. 2010. Learning from crowds. +Journal of Machine Learning Research, 11(4). + +Rodrigues, F.; Pereira, F.; and Ribeiro, B. 2014. Sequence +labeling with multiple annotators. Machine learning, 95(2): +165–181. +Sabetpour, N.; Kulkarni, A.; Xie, S.; and Li, Q. 2021. Truth +discovery in sequence labels from crowds. In 2021 IEEE +International Conference on Data Mining (ICDM), 539–548. +IEEE. +Sang, E. F.; and De Meulder, F. 2003. Introduction to the +CoNLL-2003 shared task: Language-independent named en- +tity recognition. arXiv preprint cs/0306050. +Sarawagi, S.; and Cohen, W. W. 2004. Semi-markov condi- +tional random fields for information extraction. Advances in +neural information processing systems, 17: 1185–1192. +Simpson, E.; and Gurevych, I. 2018. +A Bayesian ap- +proach for sequence tagging with crowds. arXiv preprint +arXiv:1811.00780. +Snow, R.; O’connor, B.; Jurafsky, D.; and Ng, A. Y. 2008. +Cheap and fast–but is it good? evaluating non-expert anno- +tations for natural language tasks. In Proceedings of the +2008 conference on empirical methods in natural language +processing, 254–263. +Sober´on, G.; Aroyo, L.; Welty, C.; Inel, O.; Lin, H.; and +Overmeen, M. 2013. Measuring crowd truth: Disagreement +metrics combined with worker behavior filters. In CrowdSem +2013 Workshop, volume 2. +Tian, Y.; and Zhu, J. 2012. Learning from crowds in the pres- +ence of schools of thought. In Proceedings of the 18th ACM +SIGKDD international conference on Knowledge discovery +and data mining, 226–234. +Tu, J.; Yu, G.; Domeniconi, C.; Wang, J.; Xiao, G.; and +Guo, M. 2020. Multi-label crowd consensus via joint matrix +factorization. Knowledge and Information Systems, 62(4): +1341–1369. +Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones, L.; +Gomez, A. N.; Kaiser, Ł.; and Polosukhin, I. 2017. Attention +is all you need. Advances in neural information processing +systems, 30. +Whitehill, J.; Wu, T.-f.; Bergsma, J.; Movellan, J.; and Ru- +volo, P. 2009. +Whose vote should count more: Optimal +integration of labels from labelers of unknown expertise. Ad- +vances in neural information processing systems, 22: 2035– +2043. +Wu, X.; Fan, W.; and Yu, Y. 2012. Sembler: Ensembling +crowd sequential labeling for improved quality. In Proceed- +ings of the AAAI Conference on Artificial Intelligence, vol- +ume 26. +Yu, G.; Tu, J.; Wang, J.; Domeniconi, C.; and Zhang, X. 2020. +Active multilabel crowd consensus. IEEE Transactions on +Neural Networks and Learning Systems, 32(4): 1448–1459. +Zhang, J.; and Wu, X. 2018. Multi-label inference for crowd- +sourcing. In Proceedings of the 24th ACM SIGKDD Interna- +tional Conference on Knowledge Discovery & Data Mining, +2738–2747. + diff --git a/bNAzT4oBgHgl3EQfnP3n/content/tmp_files/load_file.txt b/bNAzT4oBgHgl3EQfnP3n/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..ac5b717a11ee12a3e795776c268ed45f990bfa8c --- /dev/null +++ b/bNAzT4oBgHgl3EQfnP3n/content/tmp_files/load_file.txt @@ -0,0 +1,1866 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf,len=1865 +page_content='Learning Ambiguity from Crowd Sequential Annotations Xiaolei Lu 1 1 City University of Hong Kong Abstract Most crowdsourcing learning methods treat disagreement be- tween annotators as noisy labelings while inter-disagreement among experts is often a good indicator for the ambiguity and uncertainty that is inherent in natural language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In this paper, we propose a framework called Learning Ambiguity from Crowd Sequential Annotations (LA-SCA) to explore the inter-disagreement between reliable annotators and effec- tively preserve confusing label information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' First, a hierarchi- cal Bayesian model is developed to infer ground-truth from crowds and group the annotators with similar reliability to- gether.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' By modeling the relationship between the size of group the annotator involved in, the annotator’s reliability and ele- ment’s unambiguity in each sequence, inter-disagreement be- tween reliable annotators on ambiguous elements is computed to obtain label confusing information that is incorporated to cost-sensitive sequence labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Experimental results on POS tagging and NER tasks show that our proposed framework achieves competitive performance in inferring ground-truth from crowds and predicting unknown sequences, and inter- preting hierarchical clustering results helps discover labeling patterns of annotators with similar reliability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Introduction Sequence labeling, which refers to assign sequences of la- bels to observed sequential data, is widely used in Natural Language Processing (NLP) tasks including Part-of-Speech (POS) tagging, Chunking and Named Entity Recognition (NER).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Many downstream NLP applications (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' relation ex- traction and machine translation ) can benefit from sequential label assignments of these fundamental NLP tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Traditional sequence labeling models like Hidden Markov Models (HMMs) and Conditional Random Fields (CRFs) require handcrafted features which need to be carefully de- signed to obtain good results on a specific dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Over the past decade, deep sequence models have resulted in im- proving the performance of sequence labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For exam- ple, Bi-LSTM-CRF (Huang, Xu, and Yu 2015) and Trans- former(Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' However, these sequence label- ing models require a large amount of training data with exact annotations, which is costly and laborious to produce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In recent years, well-developed commercial crowdsourcing platforms (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Amazon Mechanical Turk and CrowdFlower (Finin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2010)) have flourished as effective tools to obtain large labeled datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Crowdsourcing utilizes contribution of the group’s intelligence, but the quality of crowd labels still cannot be guaranteed as the expertise level of annotators varies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Therefore the major focus of learning from crowds is on estimating the reliability of annotators and building prediction models based on the estimated ground-truth labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For example, Snow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (Snow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2008) used bias correc- tion to combine non-expert annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Raykar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (Raykar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2010) proposed to jointly estimate the coefficients of a logistic regression classifier and the annotators’ expertise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Many effective models like HMM-Crowd (Nguyen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2017) and Sembler (Wu, Fan, and Yu 2012) extend crowd- sourcing to sequence labeling, which enables better aggre- gating crowd sequential annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' But these approaches measure the quality of crowd labels under the assumption of only one ground-truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' As a result, the disagreement between annotators has to be considered as noisy labelings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' How- ever, research in NLP field shows that inter-disagreement among experts could be a good indicator for ambiguity and uncertainty that is inherent in language (Plank, Hovy, and Søgaard 2014b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Apparently, there is no clear answer for the linguistically hard cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' As shown in Figure 1, “like” can be tagged as conjunction or adjective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Furthermore, inter- disagreement between experts could reveal confusing label information that is related to the distribution of hard cases over label pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Figure 2 demonstrates label confusion matrix in POS tagging task, where “ADJ” (adjectives) and “NOUN” (nouns) are more likely to be confused.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Wisely incorporat- ing confusing label information into supervised learning can make the classifier more robust (Plank, Hovy, and Søgaard 2014b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' However, existing crowd sequential models do not take inter-disagreement between annotators into account.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' To explore the inter-disagreement between reliable annota- tors and effectively preserve confusing label information, in this paper, we propose a framework called Learning Ambi- guity from Crowd Sequential Annotations (LA-SCA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Our contributions can be summarized as follows: First, we develop a hierarchical Bayesian model to group annotators into different clusters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' By imposing an hierarchi- cal prior on the confusion matrix that describes the reliability of annotators in the same cluster, the hierarchical Bayesian model allows the annotators that belong to the same clus- ter to be characterized with different but similar reliability, which aims to preserve inter-disagreement between reliable arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01579v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='CL] 4 Jan 2023 Figure 1: Two examples of doubly-annotated twitter POS tagging data by different experts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Figure 2: Label confusion matrix derived from two gold annotations of 500 twitters POS tagging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' annotators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Second, a low-rank model is formulated to model the re- lationship between the size of group the annotator involved in, annotator’s reliability and element’s unambiguity in each sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Then inter-disagreement between reliable annota- tors on ambiguous elements can be obtained to compute label confusion matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Third, cost-sensitive mechanism is combined to sequence labeling to encourage two more confusing label sequences that contain the ground-truth incur a lower Hamming loss, which aims to improve the robustness of sequence model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Related Work Hidden Markov Models (HMMs) (Juang and Rabiner 1991;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Qiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2015) and Conditional Random Fields (CRFs) (Lafferty, McCallum, and Pereira 2001;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sarawagi and Cohen 2004) form the most popular generative-discriminative pair for sequence labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' With the great success of DL models, the combination of deep learning and graphical models re- ceives increasing attention (Chu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For example, Bi-LSTM-CRF (Huang, Xu, and Yu 2015) is proposed to ef- ficiently encode past and future input features by combining a bidirectional LSTM network and a CRF layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Furthermore, Transformer (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2017) is proposed with atten- tion mechanism to learn long-range dependencies, which demonstrates significant improvement in efficiency and per- formance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' However, both traditional and DL models require a large amount of training data with exact annotations, which is financially expensive and prohibitively time-consuming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Incorporating semi-supervised learning to sequence label- ing models (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' semi-supervised CRFs and semi-SVM) can partly lighten the burden of sequential annotations, but this learning mechanism still needs exact labelings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Crowdsourcing provides an effective tool to collect large la- beled dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Existing crowdsourcing learning models can be grouped into two types: wrapper and joint models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The former uses the inferred ground-truths from crowds for subsequent classifier learning while joint models simultaneously estimate annotators’ reliability and learn the prediction model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Dawid & Skene (DS) (Dawid and Skene 1979) aggregation model and its variants (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' GLAD (Whitehill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2009)) explore different ways to model the relationship between the ground- truth, annotators’ reliability and corresponding annotations, and then use Expectation Maximization (EM) approach to estimate the ground-truth labels and annotators’ reliability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sembler (Wu, Fan, and Yu 2012) and HMM-Crowd (Nguyen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2017) models are proposed to aggregate multiple annota- tions to learn sequence model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Simpson and Gurevych (2018) further took label dependency in sequential annotation into consideration and used a Bayesian approach to model crowd sequential labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' It should be addressed that the above crowd- sourcing learning models assume only one ground-truth and do not consider the inter-disagreement among annotators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' As a result, these models fail to capture inherent semantic ambi- guity in NLP tasks and preserve confusing label information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' To explore inter-disagreement between annotators, Plank, Hovy, and Søgaard (2014a) derived a label confusion ma- trix from doubly gold annotations and showed that the POS tagging classifier sensitive to confusing label information is more robust.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sober´on et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (2013) proposed CrowdTruth methodology to model ambiguity in semantic interpretation and treat all reliable annotator’s labelings on ambiguous cases as high quality annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Dumitrache, Aroyo, and Welty (2019) used ambiguity-aware ground-truths to train the classifier for open-domain relation extraction and the re- sults showed that ambiguity-aware crowds are better than the experts regarding the quality and efficiency of annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' However, CrowdTruth based models only preserve multiple ground-truths for ambiguous instances and ignore confusing label information that can benefit robust classifier learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In recent years multi-label crowdsourcing has been devel- oped to identify multiple true labels from crowds for multi- label tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Different from discovering inter-disagreement be- tween annotators in single-label crowdsourcing, most multi- label crowdsourcing methods assume that multiple ground- truths are assigned by one annotator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For example, Zhang and Wu (2018) extended generative single-label crowdsourcing method by combining the correlation among labels while Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (2018) further utilized neighbors’ annotation and effort- saving annotating behavior of each annotator to jointly esti- mate annotators’ expertise and multi-label classifier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' There have also been some research works exploring multi-label crowd consensus (Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Tu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2020) with the assumption that reliable annotators share the same label cor- relations, which fails to preserve inter-disagreement among reliable annotators, though.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Twitter data: .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' How to make Daily out ADV ADP VERB NOUN VERB ADJ Annotations: : ADV PRT VERB ADJ VERB ADV Twitter data: .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' like one of your French fry girls CONJ NOUN ADP DET ADJ NOUN NOUN Annotations: ADJ NOUN ADP DET NOUN NOUN NOUNADJ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='8 ADP - ADV - CONJ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 DET - FLAG NOUN - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 NUM - PRON PRT - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 VERB + DET FLAG NOUN NUM PRON PRTVERB 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 ADJ ADP ADV CONJ XProposed framework The proposed framework LA-SCA contains three parts: infer ground-truths and reliable annotators by hierarchical mod- eling of crowds;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' obtain confusing label information from inter-disagreement between reliable annotators on ambiguous elements via a low rank model;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' incorporate label confusion information in cost-sensitive sequence labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The details are described as follows: Hierarchical Modeling for Crowd Annotations Let Y = {yi1, yi2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', yiL}N i=1 denotes the crowd annotations provided by L annotators over N instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Each annotator l is belonged to a cluster c ∈ {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='C} and characterized by a confusion matrix Ψcl ∈ [0, 1]T ×T where T denotes the size of possible label set for Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We assume that the annotators in the same cluster have similar reliability but the corresponding annotations could be different.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For example, annotators with lower reliability will provide various annotations in labeling a specific in- stance while reliable annotators have different opinions on ambiguous instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' To preserve disagreement between the annotators in the same cluster, we use the following hierar- chical prior on each row of the confusion matrix Ψcl: ηc t ∼ Exponential(λt), ηc t > 0, (1) βc t ∼ Dirichlet(αt), 0 < βc tj < 1, T � j=1 βc tj = 1, (2) Ψcl t ∼ Dirichlet(ηc tβc t), (3) yil ∼ Multinomial(Ψcl zi), (4) where cl denotes that annotator l belongs to cluster c and zi is the ground truth label of ith instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' ηc t and βc t can be understood as the precision and mean of Ψcl t respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Besides, the cluster assignment cl and the ground truth zi follows multinomial distribution as follows: cl ∼ Multinomial(ν), (5) zi ∼ Multinomial(γ), (6) where ν and γ are sampled from Dirichlet(ϵν) and Dirichlet(ϵγ) respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We employ collapsed Gibbs sampling (Griffiths and Steyvers 2004;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Lakkaraju et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2015) to estimate the con- ditional distribution over hidden variables zi and cl (more computation details can be found in Technical Appendix A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Let c and z denote the cluster assignments and true labels respectively, c−l indicates that annotator l is excluded from the cluster assignment and z−i excludes ith instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The conditional distribution of cluster assignment of annotator l given the rest variables is computed as: p(cl = c|c−l, z, Y, η, β) ∝ p(cl = c|c−l) × p(Y l|Y −l, z, c, η, β) ∝ (n−l c + ϵυ/C) × � t Γ(ηc t) Γ(nlt + ηc t) � s Γ(nlts + ηc tβc ts) Γ(ηc tβc ts) , (7) where n−l c denotes the number of annotators (exclude l) as- signed to cluster c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' nlt is the number of instances that are annotated by l and have true label t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' nlts denotes the number of instances that are annotated with label s by l and have true label t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Similarly, p(zi = t|z−i, Y, c, η, β) is given as p(zi = t|z−i, Y, c, η, β) ∝ p(zi = t|z−i) × p(yi|Y −i, zi = t, z−i, c, η, β) ∝ (n−i t + ϵγ/T) × � c � cl=c � s(n−i lts + ηc tβc ts)I(yil=s) n−i lt + ηc t , (8) where ni t denotes the number of instances (exclude i) with true label t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' ni lt denotes the number of instances (exclude i) that are annotated by l and have true label t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' nlts denotes the number of instances (exclude i) that are annotated with label s by l and have true label t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Due to non-conjugacy of Exponential and Dirichlet prior for the likelihood function p(Y |z, c, η, β), we use Metropolis-Hastings (MH) algorithm (Chib and Greenberg 1995) to estimate the conditional posterior distribution p(βc tj|βc t(∼j), ηc t, Y c) and p(ηc t|βc t, Y c) for each cluster, and the symmetric proposal distribution (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' uniform distribution) is selected to simulate a candidate sample (algorithm details are presented in Technical Appendix C Algorithm 1 and 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' p(βc tj|βc t(∼j), ηc t, Y c) is given as p(βc tj|βc t(∼j), ηc t, Y c) ∝ p(Y cj|Y −cj, βc t, ηc t) × p(βc tj|βc t(∼j)) ∝ � cl=c � Γ(nltj + ηc tβc tj) Γ(ηc tβc tj) Γ(nltt + ηc tβc tt) Γ(ηc tβc tt) � × � βc tj 1 − uc tj �λtαtj−1 � 1 − βc tj 1 − uc tj �λtαtt−1 , (9) where βc t = � βc t1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', βc tj, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', βc tT � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' uc tj = 1 − �T s=1,s̸=t,s̸=j βc ts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The conditional posterior distribution of βc tt is obtained via βc tt = 1 − �T j=1,j̸=t βc tj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Derivation details of p(βc tj|βc t(∼j), ηc t, Y c) can be found in Technical Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' p(ηc t|βc t, Y c) is defined as p(ηc t|βc t, Y c) ∝ � cl=c Γ(ηc t) Γ(nlt + ηc t) × � j Γ(nltj + ηc tβc tj) Γ(ηc tβc t) × λte−(λtηc t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (10) (Sabetpour et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2021) By iteratively estimating zi, cl, βc t and ηc t until con- vergence, annotators that have similar reliability could be grouped into the same cluster.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Since inter-disagreement among experts could reveal linguistically ambiguous cases, to identify the cluster with reliable annotators, we compute the shared confusion matrix of each cluster based on the esti- mated ground truths z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' One ground-truth for each instance does not affect estimation of the shared confusion matrix as ambiguous cases only take up a very small part in the whole dataset (Plank, Hovy, and Søgaard 2014a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The entry of the shared confusion matrix M c ∈ RT ×T for the cluster c is defined as M c t,s = � i I(zi = t) � l∈c I(yil = s) � i I(zi = t) , (11) and the high-reliability cluster is obtained with arg maxc �T t=1 M c t,t � T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Identifying Ambiguity via Low Rank Model Based on the identified reliable annotators, to estimate am- biguity degree of each element in a sequence, we assume that in the high-reliability cluster the decisions of annotators form small groups for ambiguous elements and annotators who is more reliable in labeling this sequence is consistent with other annotators for unambiguous elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Inspired by the quantitative formula used to describe the relationship between the size of group, annotator’s reliability and task clarity (Tian and Zhu 2012), we construct an L × N s matrix A for sth sequence where N s is the length of the sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In this matrix, each entry A(l, N s j ) denotes the size of group for annotator l involved in labeling jth element in sth sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We define A(l, N s j ) as A(l, N s j ) = ωs l × µs j, (12) where ωs l represents the reliability of annotator l in labeling sth sequence and µs j is the degree of unambiguity of jth element.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Intuitively, if the annotator is more reliable or the element is less ambiguous, the size of group is more larger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Thus we employ rank-1 factorization to formulate the relationship between A(l, N s j ), ωs l and µs j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The degree of unambiguity of each element in sth sequence is computed as follows: As = UΛV T , (13) ωs = U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 � Λ11, (14) µs = V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 � Λ11, (15) where ωs = [ωs 1, ωs 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', ωs L] and µs = [µ1, µ2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', µNs].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' There are three steps concerning identifying ambiguity: a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' identify ambiguous elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We rank the set of esti- mated degree of unambiguity for the whole sequential data and choose an appropriate percentage p to identify the ele- ment that falls in the range of top p minimum as ambiguous cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' compute inter-disagreement between annotators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For the identified ambiguous cases, the disagreement among reliable annotators provides multiple possible ground-truths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Let {yjt}t=L′ t=1 (L′ <= L) denotes the set of labels assigned by annotators for jth ambiguous elements in sth sequence, the score of yjt can be defined as S(yjt) = �L l=1 I(yjl = yjt)ωs l �L l=1 I(yjl = yjt) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (16) In practice ambiguous instances have limited gold annota- tions (Plank, Hovy, and Søgaard 2014b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We select top two labels for each ambiguous element by S(yjt) in descending order and combine them with the inferred ground-truth in hierarchical modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' obtain confusing label information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Label confusion matrix CF ∈ RT ×T is utilized to show the degree of confu- sion between label pairs, and the entry CF(i, j) is defined as the mean of p(z(x) = i, z(x) = j) and p(z(x) = j, z(x) = i) where p(z(x) = i, z(x) = j) is computed as p(z(x) = i, z(x) = j) = � k I(z(xk) = i, z(xk) = j) � k I(z(xk) = i) , (17) where k denotes kth element in the whole sequential dataset, and p(z(x) = j, z(x) = i) is computed in a similar way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Cost-sensitive sequence labeling Given {xi, zi}N i=1 sequential dataset, where zi are the in- ferred ground-truths via hierarchical Bayesian modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Tra- ditional training criteria is to maximize the likelihood of conditional log-linear model, which does not distinguish the ground-truth from all incorrect outputs that are penal- ized equally through normalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' To improve sequence labeling, we employ cost-sensitive mechanism to incorporate confusing label information in the training, where the label sequence that is more confusing with the ground-truth incurs lower cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The objective of cost-sensitive sequence labeling is defined as LCS(θ) = N � i=1 log exp � θT f(xi, zi) � � zj cost(zj, zi) exp {θT f(xi, zj)}, (18) where f(xi, zi) denotes the feature function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' cost(zj, zi) is used to measure the influence of confusing label information on the loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' A weighted Hamming loss is defined to describe cost(zj, zi) as cost(zj, zi) = 1 Ki Ki � k=1 (1 − p(zjk, zik)) ∗ (zjk ⊕ zik) , (19) where Ki is the number of tokens in ith sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' ⊕ is the XOR boolean operator, p(zjk, zik) is obtained from label confusion matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Experiments We conduct experiments on POS tagging and NER for En- glish.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' It is widely debatable of POS analysis where there are many hard cases that annotators disagree on (Plank, Hovy, and Søgaard 2014b), while in NER the definition and parti- tion of named entity still remains arguable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In the following sections, we present quantitative results to investigate the effectiveness of our framework in inferring the ground-truths, predicting unknown sequences and preserving confusing la- bel information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Datasets Current published datasets cannot satisfy both crowd anno- tations and multiple gold annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We employ multiple gold-annotated and crowd-annotated datasets as follows: POS tagging: Most POS tagging datasets only contain one gold annotation which fail to identify hard cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' There- fore we use three twitter POS tagging datasets in the work of studying cost-sensitive POS tagger (Dumitrache, Aroyo, and Welty 2019), which include 500 tweets with doubly gold an- notations ( denoted as T-DGA for simplicity), RITTER-TEST (118 tweets) dataset and INHOUSE (200 tweets) dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We employ T-DGA as training data (doubly gold annotations guarantee the existence of hard cases), and RITTER-TEST and INHOUSE as test datasets1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' NER: CoNLL-2003 shared NER dataset (Sang and De Meulder 2003) is one of the most common benchmarks used in NLP community for sequence labeling, which con- tains four types of entities: persons (PER), locations (LOC), organizations (ORG) and miscellaneous (MISC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Rodrigues et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (Rodrigues, Pereira, and Ribeiro 2014) put 400 articles from CoNLL-2003 on Amazon’s Mechanical Turk to collect crowd annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' There are total 47 annotators and the av- erage number of annotators per article is 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In this paper, after pre-processing these crowd-labeled data we select 3000 sentence-level sequences, and use CoNLL 2003 test data2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Baselines We use the following six models to learn from crowd sequen- tial data as baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' MVtoken (Sang and De Meulder 2003): The ground-truth label sequence is obtained by choosing the label with more votes in token level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' DS (Dawid and Skene 1979): The EM algorithm is em- ployed to assign weight to each vote in token level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' MACE (Hovy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2013): By including a binary latent variable that denotes if and when each annotator is spamming, the model can identify which annotators are trustworthy and produce the true label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sembler (Wu, Fan, and Yu 2012): The model extends crowdcoursing learning on instance level to sequence level and jointly estimate annotators’ reliability and sequence model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' HMM-Crowd (Nguyen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2017): Based on HMMs, the model further models the “crowd component” by including the parameters for the label quality of annotators and crowd variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' HC-CLL: To verify the effectiveness of cost-sensitive se- quence labeling, we also train the sequence prediction model by maximizing conditional log-likelihood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Experimental setting Synthetic crowd annotations: As T-DGA does not have real crowd annotations, we simulate annotators with different re- liability by controlling the precision of their annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In practice the number of annotators is limited, we set the total 1Both RITTER-TEST and INHOUSE have only one gold anno- tation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2CoNLL 2003 testset has only one gold annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' number of annotators as 15 and arrange three different as- signments: [5, 5, 5], [8, 4, 3] and [3, 4, 8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In each assignment, three different ranges of precision: [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='9, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7], [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4] and [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1] are set to indicate various reliability from high to low levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' LA-SCA framework: The optimal number of clusters for annotators is selected between the range [2, 5] based on Bayesian information criteria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' λt is set to 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' To confirm that crowd annotations are better than randomly labeling, the di- agonal of αt is set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7 while the off diagonal elements are set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Furthermore, we select p = 10% to identify ambiguous elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Experimental results Comparing with baselines We evaluate the effectiveness of the proposed framework in inferring ground-truths for training data and predicting testset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' POS tagging task: For simplicity, we denote three different crowd annotations [8, 4, 3], [5, 5, 5] and [3, 4, 8] as ca1, ca2 and ca3, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Table 1 shows accuracy of inferring ground-truths in T-DGA dataset (HC-CLL is the same as LA-SCA in inferring ground- truths).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We can see that most of crowd models achieve better performance by increasing the proportion of high quality an- notations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The performance of each comparing model varies in Gold 1 and Gold 2 as these two gold annotations have different label assignments for some tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For the case of low quality annotations (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' ca3), the developed hierar- chical Bayesian model effectively identifies the annotators with high reliability, which can help guide the estimation of ground-truths and thus improves the performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' DS and HMM-Crowd achieve competitive results as the mech- anism of iteratively estimate annotators’ reliability and the ground-truths alleviates the negative effect of low quality annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Table 1: Accuracy of inferring ground-truths for T-DGA dataset (%).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Model Gold1 Gold2 ca1 ca2 ca3 ca1 ca2 ca3 MVtoken 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='82 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='84 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='70 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='94 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='81 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='20 DS 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='34 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='97 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='99 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='53 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='90 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 MACE 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='80 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='79 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='56 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='76 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='26 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='89 Sembler 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='58 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='78 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='36 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='22 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='99 HMM-Crowd 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='40 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='59 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='38 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='72 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='06 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 LA-SCA 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='30 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='59 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='22 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='47 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='73 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='71 Table 2 reports F1 score of comparing methods on RITTER-TEST and INHOUSE datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Generally the model that learns from higher quality of ground-truths can achieve better prediction performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For the wrapper models that input the inferred ground-truths to the sequence model (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' MVtoken, DS and MACE), prediction performance heav- ily depends on the quality of the estimated ground-truths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Therefore in ca3 setting, the F1 score of wrapper models (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' MVtoken, DS and MACE) is lower than that of joint models (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sembler and HMM-Crowd).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The developed hierarchical Bayesian model HC-CLL effectively identifies the cluster with high reliability which enables stable performance in handling low quality annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Compared with HC-CLL, LA-SCA achieves better results in ca1 and ca2 settings as low quality crowd annotations (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' ca3) fail to provide ef- fective confusing label information, which is more likely to add much noise in cost-sensitive sequence labeling and then degrades prediction performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Table 2: Performance of the models on RITTER-TEST and INHOUSE dataset (%).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Model RITTER- TEST INHOUSE ca1 ca2 ca3 ca1 ca2 ca3 MVtoken 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='35 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='72 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='72 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='15 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='29 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='03 DS 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='58 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='43 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='69 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='33 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='49 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 MACE 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='89 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='92 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='33 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 Sembler 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='82 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='74 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='53 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='65 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='22 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='93 HMM-Crowd 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='10 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='30 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='79 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='98 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='40 HC-CLL 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='57 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='97 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='55 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='88 LA-SCA 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='20 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='32 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='57 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='65 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='25 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='84 NER task: In NER tagging task class “O” accounts for a great pro- portion of the total classes, thus we use F1 score instead of accuracy to report the performance of inferring ground-truths for training data of CoNLL 2003 NER task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' As shown in Ta- ble 3, the developed hierarchical Bayesian model achieves the best F1 score and DS model also achieves competitive result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Table 3 also demonstrates the performance of predicting la- bels for testing data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Due to limited crowded training data the overall performance of comparing methods is well below the reported results (Rodrigues, Pereira, and Ribeiro 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The proposed framework LA-SCA still outperforms the baselines but only by a narrow margin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Since cost-sensitive learning mechanism inevitably produces label noises in NER task as there are a few confusing labels that should be attended to each other, directly maximizing log-likelihood can be com- petitive with cost-sensitive maximization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Table 3: Evaluation on CoNLL 2003 NER task (%).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Model Infer ground-truths Prediction MVtoken 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='17 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='52 DS 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='32 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='21 MACE 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='07 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='10 Sembler 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='25 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='87 HMM-Crowd 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='44 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='31 HC-CLL 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='54 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='56 LA-SCA 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='54 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='56 Identifying ambiguous cases In this section, we investi- gate the performance of LA-SCA in identifying ambiguous cases and preserving confusing label information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We present the results on T-DGA dataset (ca1 setting) as it provides the standard for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' First, we measure the performance of identifying ambigu- Figure 3: Two examples from T-DGA with gold and derived labelings on ambiguous cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' ous cases with the following measures: acc1 = #correctly identified ambiguous cases #all ambiguous cases , (20) acc2 = #correctly double annotated ambiguous cases #all ambiguous cases , (21) and we obtain that acc1 = 725/931 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='779 and acc2 = 614/931 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='660.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' It can be concluded that LA-SCA suc- cessfully identifies most of ambiguous cases in T-DGA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' We further present two examples from T-DGA with gold and derived labelings on ambiguous cases, as demonstrated in Figure 3, LA-SCA identifies ambiguous cases with label confusing pairs of [“ADJ”, “NOUN”] and [“DET”, “ADV”] successfully.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (a) Gold matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (b) Derived matrix (ca1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (c) Derived matrix (ca2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (d) Derived matrix (ca3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Figure 4: Comparison between the gold the derived label confusion matrices on T-DGA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Figure 4 shows the gold label confusion matrix and the derived confusion matrices of three settings respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=" As Tweet: How to make Daily is out PRT ADJ ADJ Gold: ADP NOUN ADV ADJ ADJ Derived: NOUN ADPTweet: Why barefoot isn't best for most runners ADJ DET Gold: ADV ADV DET Derived: ADV0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='94 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00061 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0017 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='002 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00098 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 ADJ - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='76 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='052 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0085 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='8 ADP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='000610.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0019 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='000480.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='003 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00270.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 ADV - 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='81 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 CONJ-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00170.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0046 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 DET - 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='88 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='052 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='042 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='000840.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0013 FLAG - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='25 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0 0 0 NOUN-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0520.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='000480.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='94 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 NUM - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="11 E00'0 0 0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='052 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0029 PRON - 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00140.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00120.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='9 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0041 0 PRT - 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0085 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="28 EEO'O 0 0 0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 0 VERB .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='000980.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00190.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00084 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0041 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='94 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 X- 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0038 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0013 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0029 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='29 FLAG NOUN NUMI 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 ADJ ADP ADV CONJ DET PRON PRT VERB x0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='79 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='059 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='8 ADJ -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='57 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='048 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0740.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7 ADP -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='71 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 ADV-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='048 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="033 EO'0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='056 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 CONJ -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0220.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0130.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='079 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0088 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='5 DET -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 FLAG - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='071 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 NOUN 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='074 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='056 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='079 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 NUM - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0450.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="014 E'O PRON-0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0130.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0290.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0410.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='071 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00540.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 PRT- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='059 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0088 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='047 VERB-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 X - 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='047 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 ADJ ADP ADV CONJ DET FLAG NOUN NUM PRON PRT VERB0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='69 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='058 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='066 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='055 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7 ADJ - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='46 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='039 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 60°0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0830.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 ADP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0380.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='59 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='051 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="11 EO'0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 ADV-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="039 8E0'0 8E0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='006 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0580.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='37 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='091 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='5 CONJ DET - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="026 EEO'O 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 FLAG -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='066 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='006 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 NOUN-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='051 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='091 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='73 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='091 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='056 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='3 NUM-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='083 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="04 EO'0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 PRON -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='091 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='047 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 PRT - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='055 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='063 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 VERB - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="05 E0'0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="027 EEO'O 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='056 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='047 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='063 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 X- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='15 ADJ ADP ADV CONJ DET FLAG I NOUN NUM PRON PRT VERB X0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='69 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='052 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='078 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7 ADJ - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='039 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0290.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='083 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 ADP -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="04 6E0'0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 ADV-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='052 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='039 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 60°0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 CONJ -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 EEO 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='039 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='096 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0350.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='5 DET -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='57 LEOO 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='071 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 FLAG -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='078 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0099 ( 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="17 EEO'O 3 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 NOUN-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='083 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='096 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='071 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='74 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='083 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='055 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='19 EO- NUM - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 60°0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="04 EEO'O 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 EO 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 PRON-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="017 6E0'0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='083 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='43 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 PRT - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='062 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 VERB - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='055 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='051 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 X- 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='051 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 ADJ ADP ADV CONJ DET FLAG NOUN NUM F PRON PRT VERB X(a) Cluster 1 (b) Cluster 2 (c) Cluster 3 Figure 5: Shared confusion matrices of three clusters on POS tagging task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' crowds contain noisy label information, the agreement for some labels is lower than the gold one, which may gen- erate wrong confusing label information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For example, in the twitter “FollowerSale is most Trusted company to buy”, the gold annotation of “to” is [“VERB”] while the derived confusing label set is [“VERB”, “PRON”].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' But the derived matrices also preserve some confusing label information that is similar to the gold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For example, the agreement between adjectives [“ADJ”] are nouns [“NOUN”], and [“X”] category is more likely to be confused with punctuations [“.”] and nouns [“NOUN”].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Interpreting clusters Clustering for crowd annotations can help discover common patterns of the annotators with similar reliability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Here we demonstrate how to interpret the shared confusion matrices of the estimated clusters in POS tagging and NER tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' POS tagging task: We choose ca1 setting and present the shared confusion matrices of three clusters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' By reviewing the diagonal ele- ments in three shared confusion matrices of Figure 5, we can see that the developed Bayesian hierarchical model sepa- rates the annotators with different reliability well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Cluster 1 demonstrates the annotators with high reliability where the average successful identification value is above 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7, while the annotators with lower reliability are clustered into the third cluster where the average successful identification value is below 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' NER task: We present clustering results of crowd annotations col- lected from AMT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Generally these crowd annotations are of good quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' It can be seen from Figure 6 that both two clusters are reliable in assigning “I-PER”, “O”, “I-LOC” and “B-ORG”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Cluster 1 shows the more reliable annotations where the average diagonal value is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='742, and the average diagonal value in cluster 2 is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='535.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Discussion The major concern regarding LA-SCA evaluation is to per- form on the dataset with both crowd annotations and multiple gold annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' For T-DGA, we have to simulate crowd an- notations and use precision to indicate annotator’s reliability in global view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' A situation will arise that simple cases are more likely to be incorrectly annotated in the precision with (a) Cluster 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (b) Cluster 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Figure 6: Shared confusion matrices of clusters on NER task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='9, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7], which is not in line with the decision of reliable an- notators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In the crowd-annotated CoNLL-2003 NER task, the size of assigned annotators per article is limited and crowd annotations are of good quality, which more or less hinders exploration of labeling diversity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' While the incomplete datasets partially limit the applicabil- ity of LA-SCA, the proposed hierarchical Bayesian modeling shows its competitiveness in inferring ground-truths from real crowd annotations and synthetic crowds with low reliability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' As cost-sensitive mechanism expects sparse label confusion matrix in the task (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' NER) where only a few labels are more confused with each other, it still remains to be explored to achieve significant improvement in predicting unknown sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Conclusion In this paper, we propose a framework called Learning Am- biguity from Crowd Sequential Annotations (LA-SCA) to ex- plore inter-disagreement between reliable annotators and ef- fectively preserve confusing label information to improve ro- bust sequence classifier learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Experimental results show that LA-SCA achieves competitive performance in inferring ground-truth from crowds and predicting testset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Further, identified clusters can help interpret labeling patterns of the annotators with similar reliability, which can help task de- signers improve labeling guideline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='88 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00170.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00087 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00260.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0096 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 ADJ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='093 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='066 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='085 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='066 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 ADP - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='068 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='093 60°0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='067 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='077 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='8 ADV - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='077 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='072 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='072 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='067 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='057 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 CONJ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='035 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='87 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0058 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0058 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 DET - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='093 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='084 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='16 FLAG - 0 0 0 0 0 1 0 0 0 0 0 0 NOUN - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='097 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='039 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='053 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='095 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='075 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='071 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 NUM - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='57 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='081 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='009 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='094 PRON 1-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="021 0 0 EE0'0E800'0 0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00420.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='85 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 PRT - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='16 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 VERB 660°0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00340.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='087 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='096 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 X-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00310.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0093 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00930.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0093 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='86 x 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 ADJ ADP ADV CONJ DET FLAG NOUN NUM PRON PRT VERB1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='62 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0087 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00170.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0330.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0008 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='26 ADJ - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='27 ADP - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='067 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='055 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0098 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='078 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0150 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='8 ADV 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='21 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='057 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0052 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='29 CONJ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='32 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0058 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='093 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 +800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='27 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 DET 0 FLAG 0 0 0 1 0 0 0 0 0 0 0 0 0 NOUN 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='22 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='072 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='074 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='082 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 NUM - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 0 600°0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="013 EEO E90'0 0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 PRON 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="058 EEO'O 0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 EEO 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 PRT - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='068 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 VERB 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='072 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='092 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='074 X- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='05 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00310.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 ADJ ADP ADV CONJ DET FLAG NOUN NUM PRON PRT VERB x5sIw-1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='64 0 EO 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='043 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0 0 I-PER 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0 0 0 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0081 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0071 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00052 B-PER 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='76 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='039 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0056 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='079 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content="011 I-LOC EO'O 0 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='061 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='7 0 EOO 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0 8E0 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 B-LOC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0072 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='072 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='062 B-ORG 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='015 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 I-MiSC I-PER 0 B-PER I-ORG I-LOC B-MISC B-LOC B-ORGI-MISC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='56 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='41 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0 I-PER 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0076 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='88 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00085 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00085 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='8 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0013 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0006 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00027 B-PER 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='43 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0013 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0078 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00065 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 1-ORG 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='047 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0073 I-LOC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='064 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='18 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='71 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 B-MISC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0055 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='095 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='066 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 B-LOC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0014 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='072 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0057 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='92 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0022 B-ORG 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0047 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='52 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0017 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='082 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 I-MiSC I-PER 0 B-PER I-ORG I-LOC B-MISC B-LOC B-ORG- 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='93 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0061 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 ADJ -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0830.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0220.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='046 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='044 ADP-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='057 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='8 ADV -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='73 9E00 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='057 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='052 CONJ - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='029 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='017 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='91 0 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0058 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 DET - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='049 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0089 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='6 FLAG - 0 0 0 0 0 0 1 0 0 0 0 0 0 NOUN -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='037 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='64 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='4 NUM -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0045 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 PRON -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0083 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='025 0 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='97 0 0 PRT - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='054 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='041 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='014 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='66 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='14 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='2 VERB -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='022 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='033 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00680.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0011 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='032 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='83 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='045 X-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='27 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0031 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='016 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00310.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00310.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00620.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='019 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='012 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='66 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='0 ADJ ADP ADV CONJ DET FLAG NOUN NUM PRON PRT VERB XA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Estimating the conditional distribution over hidden variables zi and cl a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Derivation of p(cl = c|c−l, z, Y, η, β) is as follows: p(cl = c|c−l, z, Y, η, β) ∝ p(cl = c|c−l) × p(Y l|Y −l, z, c, η, β), (22) where p(cl = c|c−l) is computed as p(cl = c|c−l) = p(c1, c2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='., cl = c)/p(c1, c2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', cl−1) = Γ(ϵν) Γ(L+ϵν) �C c=1 Γ(nc+ϵν/C) Γ(ϵν/C) � Γ(ϵν) Γ(L+ϵν−1) �C c=1 Γ(n−l c +ϵν/C) Γ(ϵν/C) = Γ(L + ϵν − 1) Γ(L + ϵν) Γ(nc + ϵν/C) Γ(n−l c + ϵν/C) = n−l c + ϵν/C L + ϵν − 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (23) The likelihood p(Y |z, c, η, β is obtained by integrating out the variables Ψcl: p(Y |z, c, η, β) = � c � l∈c � t � p(Y cl|Ψcl t )p(Ψcl t |ηcβc t)d(ηcβc t) = � c � l∈c � t � Γ(ηc t) Γ(nlt + ηc t) � s Γ(nlts + ηc tβc ts) Γ(ηc tβc ts) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (3) Then via Bayesian rule we obtain the conditional distribu- tion: p(Y l|Y −l, z, c, η, β) = � t � Γ(ηc t ) Γ(nlt+ηc t ) � s Γ(nlts+ηc t βc ts) Γ(ηc t βc ts) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (4) b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Derivation of p(zi = t|z−i, Y, c, η, β) is as follows: p(zi = t|z−i, Y, c, η, β) ∝ p(zi = t|z−i) × p(yi|Y −i, zi = t, z−i, c, η, β), (5) where p(zi|z−i) is computed as p(zi = t|z−i) = p(z1, z2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='., zi = t)/p(z1, z2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=',' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' zi−1) = Γ(ϵγ) Γ(N+ϵγ) �T t=1 Γ(nt+ϵγ/T ) Γ(ϵγ/T ) � Γ(ϵγ) Γ(N+ϵγ−1) �T t=1 Γ(n−i t +ϵγ/T ) Γ(ϵγ/T ) = Γ(N + ϵγ − 1) Γ(N + ϵγ) Γ(nt + ϵγ/T) Γ(n−i t + ϵγ/T) = n−i t + ϵγ/T N + ϵγ − 1 ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (6) and p(yi|Y −i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' zi = t,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' z−i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' c,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' η,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' β) is obtained as p(yi|Y −i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' zi = t,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' z−i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' c,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' η,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' β) = � c � l∈c � Γ(ηc t ) Γ(nlt+ηc t ) � s Γ(nlts+ηc t βc ts) Γ(ηc t βc ts) � Γ(ηc t ) Γ(n−i lt )+ηc t ) � s Γ(n−i lts)+ηc t βc ts) Γ(ηc t βc ts) � = � c � l∈c � s(n−i lts + ηc tβc ts)I(yil=s) n−i lt + ηc t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (7) B a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Derivation of p(βc tj|βc t(∼j), ηc t, Y c) is as follows: p(βc tj|βc t(∼j), ηc t, Y c) ∝ p(Y cj|Y −cj, βc t, ηc t) × p(βc tj|βc t(∼j)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (8) First, βc t follows dirichlet distribution: βc t|αt ∼ Dirichlet(αt), 0 < βc tj < 1, T � j=1 βc tj = 1, (9) where the detailed form is given as: � βc t1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', βc tj, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', βc tT � ∼ Dirichlet (λtαt1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', λtαtj, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=', λtαtT ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (10) With the aggregation property we obtain that � βc tj, uc tj, βc tt = 1 − βc tj − uc tj, � ∼ Dirichlet (λtαtj, aj, λtαtt), (11) where uc tj = 1 − T � s=1,s̸=t,s̸=j βc ts, (12) aj = T � s=1,s̸=t,s̸=j λtαts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (13) The joint probability p(βc tj, uc tj) is given as p(βc tj, uc tj) = � l∈c Γ(�T j=1 λtαtj) Γ(λtαtj)Γ(aj)Γ(λtαtt) × (βc tj)λtαtj−1(uc tj)aj−1(1 − βc tj − uc tj)λtαtt−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (14) The marginal probability p(uc tj|ηc t, Y c) is computed as p(uc tj) = Γ(�T j=1 λtαtj) Γ(aj)Γ(λtαtt + λtαtj) × (uc tj)aj−1(1 − uc tj)λtαtt+λtαtj−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (15) Then the conditional distribution p(βc tj|βc t(∼j)) is given as p(βc tj|βc t(∼j)) ∝ � βc tj 1 − uc tj �λtαtj−1 � 1 − βc tj 1 − uc tj �λtαtt−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (16) According to Equation (3) in Appendix A, it can be easily obtained that p(Y cj|Y −cj, βc t, ηc t) = � l∈c � Γ(nltj+ηc t βc tj) Γ(ηc t βc tj) Γ(nltt+ηc t βc tt) Γ(ηc t βc tt) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (17) Finally we obtain that p(βc tj|βc t(∼j), ηc t, Y c) ∝ p(Y cj|Y −cj, βc t, ηc t) × p(βc tj|βc t(∼j)) ∝ � l∈c � Γ(nltj + ηc tβc tj) Γ(ηc tβc tj) Γ(nltt + ηc tβc tt) Γ(ηc tβc tt) � × � βc tj 1 − uc tj �λtαtj−1 � 1 − βc tj 1 − uc tj �λtαtt−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (18) b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Derivation of p(ηc t|βc t, Y c) is as follows: The joint posterior distribution p(ηc t, βc t|Y c) is given as p(ηc t, βc t|Y c) ∝ � l∈c Γ(ηc t) Γ(nlt + ηc t) � j Γ(nltj + ηc tβc tj) Γ(ηc tβc t) × � j (βc tj)λtαtj−1 × λe−(ληc t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (19) Then the conditional posterior distribution p(ηc t|βc t, Y c) is obtained with p(ηc t|βc t, Y c) ∝ � l∈c Γ(ηc t ) Γ(nlt+ηc t ) � j Γ(nltj+ηc t βc tj) Γ(ηc t βc t ) × λe−(ληc t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' (20) C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Metropolis-Hastings algorithm simulating βc tj and ηc t Algorithm 1: Metropolis-Hastings algorithm simulating βc tj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 1: Initialize βc0 tj 2: for i = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' do 3: Propose: βccand tj ∼ U(0, min � 2βci−1 tj , 1 � ) 4: Acceptance probability: α(βccand tj |βci−1 tj ) = min � 1, p(β ccand tj |βc t(∼j),ηc t ,Y c) p(β ci−1 tj |βc t(∼j),ηc t ,Y c) � 5: u ∼ U(0, 1) 6: if u < α then 7: Accept the proposal: βci tj ← βccand tj 8: else 9: Reject the proposal:βci tj ← βci−1 tj 10: end if 11: end for Algorithm 2: Metropolis-Hastings algorithm simulating ηc t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Initialize ηc0 t 2: for i = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' do Propose: ηccand t ∼ U(0, min � 2ηci−1 t , 1 � ) 4: Acceptance probability: α(ηcand t |ηi−1 t ) = min � 1, p(ηcand t |βc t ,Y c) p(ηi−1 t |βc t ,Y c) � u ∼ U(0, 1) 6: if u < α then Accept the proposal: ηci t ← ηccand t 8: else Reject the proposal:ηci t ← ηci−1 t 10: end if end for References Chib, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Greenberg, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 1995.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Understanding the metropolis-hastings algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' The american statistician, 49(4): 327–335.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Chu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Ouyang, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Li, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Wang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Crf-cnn: Modeling structured information in human pose estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' arXiv preprint arXiv:1611.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00468.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Dawid, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Skene, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 1979.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Maximum likelihood estimation of observer error-rates using the EM algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Journal of the Royal Statistical Society: Series C (Applied Statistics), 28(1): 20–28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Dumitrache, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Aroyo, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Welty, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' A crowd- sourced frame disambiguation corpus with ambiguity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' arXiv preprint arXiv:1904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='06101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Finin, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Murnane, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Karandikar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Keller, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Martineau, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Dredze, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Annotating named entities in twitter data with crowdsourcing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon’s Mechanical Turk, 80–88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Griffiths, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Steyvers, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Finding scientific topics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Proceedings of the National academy of Sciences, 101(suppl 1): 5228–5235.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Hovy, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Berg-Kirkpatrick, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Vaswani, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Hovy, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Learning whom to trust with MACE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 1120–1130.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Huang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Xu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Yu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Bidirectional LSTM-CRF models for sequence tagging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' arXiv preprint arXiv:1508.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='01991.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Juang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Rabiner, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 1991.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Hidden Markov models for speech recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Technometrics, 33(3): 251– 272.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Lafferty, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' McCallum, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Pereira, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Con- ditional random fields: Probabilistic models for segmenting and labeling sequence data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Lakkaraju, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Leskovec, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Kleinberg, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Mullainathan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' A bayesian framework for modeling human eval- uations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the 2015 SIAM International Conference on Data Mining, 181–189.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' SIAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Jiang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Chawla, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Zhou, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Multi-label learning from crowds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' IEEE Transactions on Knowledge and Data Engineering, 31(7): 1369–1382.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Nguyen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Wallace, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Nenkova, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Lease, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Aggregating and predicting sequence labels from crowd annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the conference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Association for Computational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Meeting, volume 2017, 299.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' NIH Public Access.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Plank, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Hovy, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Søgaard, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2014a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Learning part- of-speech taggers with inter-annotator agreement loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics, 742–751.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Plank, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Hovy, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Søgaard, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2014b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Linguistically debatable or just plain wrong?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the 52nd Annual Meeting of the Association for Computational Lin- guistics (Volume 2: Short Papers), 507–511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Qiao, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Bian, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Da Xu, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Tao, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Diver- sified hidden Markov models for sequential labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' IEEE Transactions on Knowledge and Data Engineering, 27(11): 2947–2960.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Raykar, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Yu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Zhao, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Valadez, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Florin, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Bogoni, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Moy, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Learning from crowds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Journal of Machine Learning Research, 11(4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Rodrigues, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Pereira, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Ribeiro, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sequence labeling with multiple annotators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Machine learning, 95(2): 165–181.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sabetpour, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Kulkarni, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Xie, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Li, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Truth discovery in sequence labels from crowds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In 2021 IEEE International Conference on Data Mining (ICDM), 539–548.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sang, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and De Meulder, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Introduction to the CoNLL-2003 shared task: Language-independent named en- tity recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' arXiv preprint cs/0306050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sarawagi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Cohen, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Semi-markov condi- tional random fields for information extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Advances in neural information processing systems, 17: 1185–1192.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Simpson, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Gurevych, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' A Bayesian ap- proach for sequence tagging with crowds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' arXiv preprint arXiv:1811.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='00780.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Snow, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' O’connor, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Jurafsky, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Ng, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Cheap and fast–but is it good?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' evaluating non-expert anno- tations for natural language tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the 2008 conference on empirical methods in natural language processing, 254–263.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sober´on, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Aroyo, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Welty, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Inel, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Lin, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Overmeen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Measuring crowd truth: Disagreement metrics combined with worker behavior filters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In CrowdSem 2013 Workshop, volume 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Tian, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Zhu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Learning from crowds in the pres- ence of schools of thought.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the 18th ACM SIGKDD international conference on Knowledge discovery and data mining, 226–234.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Tu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Yu, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Domeniconi, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Xiao, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Guo, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Multi-label crowd consensus via joint matrix factorization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Knowledge and Information Systems, 62(4): 1341–1369.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Vaswani, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Shazeer, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Parmar, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Uszkoreit, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Jones, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Gomez, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Kaiser, Ł.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Polosukhin, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Attention is all you need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Advances in neural information processing systems, 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Whitehill, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Wu, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content='-f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Bergsma, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Movellan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Ru- volo, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Whose vote should count more: Optimal integration of labels from labelers of unknown expertise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Ad- vances in neural information processing systems, 22: 2035– 2043.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Wu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Fan, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Yu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Sembler: Ensembling crowd sequential labeling for improved quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceed- ings of the AAAI Conference on Artificial Intelligence, vol- ume 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Yu, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Tu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Domeniconi, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Zhang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Active multilabel crowd consensus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' IEEE Transactions on Neural Networks and Learning Systems, 32(4): 1448–1459.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' and Wu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' Multi-label inference for crowd- sourcing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} +page_content=' In Proceedings of the 24th ACM SIGKDD Interna- tional Conference on Knowledge Discovery & Data Mining, 2738–2747.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bNAzT4oBgHgl3EQfnP3n/content/2301.01579v1.pdf'} diff --git a/bdFST4oBgHgl3EQfCjh5/content/2301.13707v1.pdf b/bdFST4oBgHgl3EQfCjh5/content/2301.13707v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b527abd50d054822d6ac6dbd357f923437dc0deb --- /dev/null +++ b/bdFST4oBgHgl3EQfCjh5/content/2301.13707v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beac12167ae43a015c731e734d1f80d3de2190969485d7525873ffe4052cbb5d +size 6141215 diff --git a/bdFST4oBgHgl3EQfCjh5/vector_store/index.pkl b/bdFST4oBgHgl3EQfCjh5/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..3184e2834f06d9187171d3cc9cfd06724552451e --- /dev/null +++ b/bdFST4oBgHgl3EQfCjh5/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3374ffb14bbb1f410f25ddbbdc177097397de7f0e302687cfce9e68949c88f73 +size 231723 diff --git a/c9FIT4oBgHgl3EQfnivD/content/2301.11315v1.pdf b/c9FIT4oBgHgl3EQfnivD/content/2301.11315v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8cd4b41c8981645c4201258109c0fb99b8f6ca91 --- /dev/null +++ b/c9FIT4oBgHgl3EQfnivD/content/2301.11315v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d4629284ac05950eec6746c8d829209dc7611c8771201961d7b0dec07b57b52 +size 1179777 diff --git a/c9FIT4oBgHgl3EQfnivD/vector_store/index.faiss b/c9FIT4oBgHgl3EQfnivD/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..112d3b104d472bad71c99eb1727b102e0d2fd1a0 --- /dev/null +++ b/c9FIT4oBgHgl3EQfnivD/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5810894044164ff9b6a402785c0ca1bbe95697cc707266fb88b5f43a369bd029 +size 1835053 diff --git a/c9FIT4oBgHgl3EQfnivD/vector_store/index.pkl b/c9FIT4oBgHgl3EQfnivD/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..dc29c2f6599296dc471151eae9b2494ba429e102 --- /dev/null +++ b/c9FIT4oBgHgl3EQfnivD/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7ebe8658d61b91d890b32cdf63644e2a2bd565daf002e2198422c70528e330d +size 67796 diff --git a/cNE0T4oBgHgl3EQf4wL5/content/2301.02744v1.pdf b/cNE0T4oBgHgl3EQf4wL5/content/2301.02744v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..23bd03c16ee732c512e9cc4a85faf2baf3cfd319 --- /dev/null +++ b/cNE0T4oBgHgl3EQf4wL5/content/2301.02744v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d625f2ae54541f34fab5fdcedd6cd22784f972179830e637face0a39831fa2c2 +size 166987 diff --git a/cNE0T4oBgHgl3EQf4wL5/vector_store/index.faiss b/cNE0T4oBgHgl3EQf4wL5/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..aa9d47eb1a37277e31d403a3b60819a71a735a35 --- /dev/null +++ b/cNE0T4oBgHgl3EQf4wL5/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da36d27ff7a588069854b47e32ae8965271d72517d8e7817c1132aac5231bf2f +size 1769517 diff --git a/cNE0T4oBgHgl3EQf4wL5/vector_store/index.pkl b/cNE0T4oBgHgl3EQf4wL5/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..4b465d17b43e7a248dab37eed8ff45030ae0e672 --- /dev/null +++ b/cNE0T4oBgHgl3EQf4wL5/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb3efd6e8704574e54ad15ff840c171fa5fcc4f21fd28ec77527015662b82c27 +size 72231 diff --git a/hNE3T4oBgHgl3EQf4AvA/vector_store/index.faiss b/hNE3T4oBgHgl3EQf4AvA/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..f4722cca9d580aef1c05e230112e49b376068698 --- /dev/null +++ b/hNE3T4oBgHgl3EQf4AvA/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13a795b11bfe213a260d7fcdecd9f574c56610e3c48666460578d0ed57b704ee +size 5832749 diff --git a/jNFAT4oBgHgl3EQfaR1d/content/tmp_files/2301.08550v1.pdf.txt b/jNFAT4oBgHgl3EQfaR1d/content/tmp_files/2301.08550v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..5755acc41346f18e2b6f4bf6bc633dc2ea080ffb --- /dev/null +++ b/jNFAT4oBgHgl3EQfaR1d/content/tmp_files/2301.08550v1.pdf.txt @@ -0,0 +1,1796 @@ +arXiv:2301.08550v1 [cond-mat.supr-con] 20 Jan 2023 +Superconducting fluctuations and charge-4e plaquette state at strong coupling +Qiong Qin,1, 2 Jian-Jun Dong,3 Yutao Sheng,1, 2 Dongchen Huang,1, 2 and Yi-feng Yang1, 2, 4, ∗ +1Beijing National Laboratory for Condensed Matter Physics and Institute of Physics, +Chinese Academy of Sciences, Beijing 100190, China +2University of Chinese Academy of Sciences, Beijing 100049, China +3Department of Physics and Chongqing Key Laboratory for Strongly Coupled Physics, +Chongqing University, Chongqing 401331, China +4Songshan Lake Materials Laboratory, Dongguan, Guangdong 523808, China +(Dated: January 23, 2023) +Recent experiments indicate that superconducting fluctuations also play an important role in +overdoped cuprates. Here we apply the static auxiliary field Monte Carlo approach to study phase +correlations of the pairing fields in a microscopic model with spin-singlet pairing interaction. We +find that the short- and long-range phase correlations are well captured by the phase mutual in- +formation, which allows us to construct a theoretical phase diagram containing the uniform d-wave +superconducting region, the phase fluctuating region, the local pairing region, and the disordered +region. We show that the gradual development of phase coherence has a number of consequences +on spectroscopic measurements, such as the development of the Fermi arc and the anisotropy in the +angle-resolved spectra, scattering rate, entropy, specific heat, and quasiparticle dispersion, in good +agreement with experimental observations. For strong coupling, our Monte Carlo simulation re- +veals an unexpected charge-4e plaquette state with d-wave bonds, which competes with the uniform +d-wave superconductivity and exhibits a U-shaped density of states. +I. INTRODUCTION +Superconducting fluctuations have been proposed to +play an important role in underdoped cuprates [1–14]. +Their presence may be responsible for the back bending +bands above the superconducting transition temperature +Tc [15], continuous variation of the spectral gap across +the transition [16], and probably the large Nernst effect +and diamagnetic signals [17]. They have also been used +to explain the mysterious pseduogap [18–22], but negated +by some experiments showing that superconducting fluc- +tuations only exist in a much narrower region than the +pseudogap [23]. Their interplay with competing orders +may be the cause of particle-hole asymmetry [24], time +reversal symmetry breaking [24, 25], or rotational sym- +metry breaking [26] observed in some materials. +In overdoped cuprates, however, superconducting fluc- +tuations have scarcely been considered seriously [27], al- +though a linear proportionality between the superfluid +density and Tc has been reported and indicates a key +role of phase stiffness in controlling the superconductiv- +ity [28, 29]. +Very recently, the angle-resolved photoe- +mission spectroscopy (ARPES) observation of a d-wave +gap and particle-hole symmetric dispersion above Tc in +overdoped Bi2Sr2CaCu2O8+δ [30–32] has stimulated in- +tensive debates concerning the existence of phase fluctu- +ations in overdoped cuprates and whether the observed +anomalous properties are due to superconducting fluctu- +ations or involve other mechanisms such as anisotropic +impurity scattering [33]. +In this work, we explore potential consequences of su- +perconducting fluctuations on the spectroscopic obser- +vations in overdoped cuprates. Different from previous +studies [14, 33–35], we employ a static auxiliary field +Monte Carlo approach [36–41] and use phase mutual in- +formation to analyze short- and long-range phase corre- +lations of the superconducting pairing fields. The mu- +tual information [42–48] measures the nonlinear associa- +tion of the probabilistic distribution [49–51] and has been +sucessfully applied to various physical systems [52–57]. It +provides an excellent indicator of superconducting phase +correlation and allows us to construct a superconducting +phase diagram with the temperature and pairing inter- +action. We identify three temperature scales over a wide +intermediate range of the pairing interaction and deter- +mine four distinct phases: the superconducting, (macro- +scopic) phase fluctuating, local pairing, and disordered +regions. Calculations of the angle-resolved spectra, scat- +tering rate, entropy, specific heat, quasiparticle disper- +sion, and Fermi arc show interesting anisotropic features, +beyond the mean-field theory but agreeing well with ex- +periments. For sufficiently strong pairing interaction, we +find a plaquette state of charge-4e pairing with a U- +shaped density of states that competes with the uniform +d-wave superconductivity [58]. Our work provides a sys- +tematic understanding of the effects of superconducting +fluctuations on the spectroscopic properties in overdoped +cuprates. +II. MODEL AND METHOD +We start with the following Hamiltonian, +H = − +� +ilσ +tilc† +iσclσ − µ +� +iσ +c† +iσciσ − V +� +⟨ij⟩ +� +ψS +ij +�† ψS +ij, (1) + +2 +where the pairing interaction is written in an explicit +form for the spin-singlet superconductivity with ψS +ij = +1 +√ +2(ci↓cj↑ − ci↑cj↓) and the strength V > 0, which may +be directly derived from an antiferromagnetic spin den- +sity interaction or an attractive charge density interac- +tion between nearest-neighbor sites [59]. To decouple the +pairing interaction, we apply the Hubbard-Stratonovich +transformation and introduce the auxiliary field ∆ij [60]: +V ¯ψS +ijψS +ij → ¯∆ijψS +ij + ¯ψS +ij∆ij + |∆ij|2 +V +. +(2) +The model is generally unsolvable. To proceed, we fur- +ther adopt a static approximation and ignore the imagi- +nary time dependence of the auxiliary fields. This allows +us to integrate out the fermionic degrees of freedom and +simulate solely the pairing fields ∆ij. We obtain an ef- +fective action: +Seff(∆) = − +� +i +ln(1 + e−βΛi) + 2βV +� +⟨ij⟩ +¯∆ij∆ij, +(3) +where β is the inverse temperature and Λi are the eigen- +values of the matrix +O = +� +−µ − T +M +M ∗ +µ + T +� +, +(4) +in which T is the N × N hopping matrix (N is the site +number) and Mij = V ∆ij comes from the pairing term. +For spin-singlet pairing, ∆ij is symmetric and defined +on the bond between two nearest-neighbor sites ij. We +thus have totally 2N independent complex variables sat- +isfying the probabilistic distribution: +p(∆) = Z−1e−Seff, +Z = +� +D∆D ¯∆e−Seff, +(5) +where Z is the partition function serving as the normal- +ization factor. Because O is an Hermitian matrix, all its +eigenvalues Λi and consequently Seff are real. Hence, the +above model can be simulated using Monte Carlo with +the Metropolis algorithm. In the following, all presented +results are obtained on a 10×10 square lattice (N = 100). +Results on larger lattices [61, 62] have been examined +and are qualitatively consistent. For simplicity, only the +nearest-neighbor (t) and next-nearest-neighbor (t′) hop- +ping parameters are included. We take t′ = −0.45t fol- +lowing the common choice in the literature [63, 64] and +set t as the energy unit. For real materials, t is typically +of the order of 0.1 eV. The chemical potential µ is spe- +cially chosen to have the large Fermi surface of overdoped +cuprates [65, 66], as plotted in the inset of Fig. 1(a). +III. RESULTS +For comparison, we first discuss the uniform mean- +field solution. +The pairing fields are found to satisfy +FIG. 1: (a) The mean-field phase diagram, where Tc and +∆(0) are the superconducting transition temperature and the +maximum of the momentum-dependent gap at T = 0, respec- +tively. The inset gives the superconducting gap ∆k along the +Fermi surface for V = 1.5 at T = 0.0001. (b) Evolution of +the amplitude distribution p(|∆|) for all bonds at T = 0.0008, +showing one peak for moderate interaction and two peaks for +strong interaction. (c) The peak position |∆|max of p(|∆|) as +a function of the pairing interaction V , where two maxima +are seen to occur for V ≥ 4. (d) Evolution of p(|∆|) from +two-peak to one-peak structure with increasing temperature +for V = 6.1. +∆x = −∆y, where the superscript represents the bond +direction. Gap along the Fermi surface is shown in the +inset of Fig. 1(a), reflecting a typical dx2−y2-wave struc- +ture [67]. The maximum gap size ∆(T = 0) and Tc are +plotted in Fig. 1(a) and both increase with increasing +pairing interaction V . The typical BCS formula of Tc is +reproduced only at small V but violated for V > 0.5, +where we find a roughly linear relation Tc ∼ V with the +ratio 2∆(0)/Tc ≈ 4.6 − 6.1 greater than BCS prediction. +Thus the mean-field solution already indicates a strong- +coupling nature of the superconductivity for large V . +A. Spatial correlations of the pairing fields +Our Monte Carlo simulations of the auxiliary pair- +ing fields allow us to study the effect of superconduct- +ing fluctuations beyond the mean-field solution. +Fig- +ure 1(b) shows the amplitude distribution of the pair- +ing field p(|∆|) on all bonds at a very low temperature +T = 0.0008. We focus on moderate and large pairing +interactions where Tc is not too small for our numeri- + +(a) +(b) +元 +30 +V=1.3 +6 +0.5 +*V=3.6 +T=0.0001 +25 +V=1.5 +0 ++V=4.4 +△(0) +-0.5 +20 +4 +-元 +T=0.0008 +-T +T +15 +11 +T +T +D +c +△(0) +10 +2 +5 +0 +0 +0 +2 +4 +6 +8 +0 +0.3 +0.6 +0.9 +1.2 +V +[△ / +(c) +(p) +0- T=0.015 +T=0.045 +2.0 +5 +¥T=0.075 +T=0.105 +4 +max +1.5 +d +3 +1.0 +p +2 +T=0.0008 +0.5 +V=6.1 +0 +0.5 +2.5 +4.5 +6.5 +0 +1 +2 +3 +V3 +cal simulations. For V = 1.3 and 3.6, the distributions +are quite normal and can be well fitted by a Gaussian +form. But for V ≥ 4, it develops a two-peak structure. +Figure 1(c) summarizes the peak positions as a function +of V for T = 0.0008. A transition occurs at V ≈ 4.0, +separating the superconductivity into two regions. We +will see that they correspond to a homogeneous super- +conducting state for moderate V and a spatially mod- +ulated state for large V , respectively. In Fig. 1(d), the +two-peak distribution at large V is gradually suppressed +with increasing temperature and becomes a single peak +at sufficiently high temperatures, implying a close rela- +tion between two states. +We first focus on the homogeneous state for moder- +ate V and study its properties from the perspective of +phase correlations of the pairing fields. Our tool is the +joint distribution p(θi +0, θi +R), where 0 ≡ (0, 0) denotes the +bond attached to any origin site, R represents the rela- +tive coordinate of the other bond, and i = x, y denotes +the bond along x- or y-direction. Figure 2(a) plots some +typical results for i = x and R = (1, 0) (short-range) +and (5, 5) (long-range) at different temperatures. +Due +to rotational symmetries, the results are the same for +i = y. At high temperatures, we find a uniform distribu- +tion due to strong thermal fluctuations. With lowering +temperature, two phases are gradually locked, as mani- +fested by the maximum distribution along the diagonal. +A direct comparison shows that this feature first appears +on short range with R = (1, 0) and then on longe range +with R = (5, 5). Hence, the phase coherence of the su- +perconducting pairing grows gradually on the lattice to +longer distance with decreasing temperature. +To quantify the correlation, we introduce their phase +mutual information defined as +Ii +R = +� +dθi +0dθi +R p(θi +0, θi +R) ln p(θi +0, θi +R) +p(θi +0)p(θi +R), +(6) +where p(x) is the marginal distribution function of the +continuous random variable x and p(x, y) is the joint +probability distribution of x and y. +Figure 2(b) com- +pares the phase correlations as a function of tempera- +ture on short and long distances. We see they all ex- +hibit similar behavior below Tc = 0.054 and vary ex- +ponentially (dashed lines) with the temperature. +But +for R = (5, 5), the mutual information suffers from an +abrupt change and diminishes more rapidly above Tc, in- +dicating a disparity between short and long-range phase +correlations. +Thus, Tc marks a characteristic temper- +ature scale separating the phase coherence on different +spatial scales, above which long-range correlations start +to be suppressed. +At higher temperature Tp = 0.08 for the chosen pa- +rameters, a weaker slope change is found for both short +and long-range correlations. To see what happens at this +temperature, we apply the principal component analy- +sis (PCA) to the Monte Carlo samples as collected in +FIG. 2: (a) Comparison of the joint distribution p(θx +0, θx +R) for +R = (1, 0) and R = (5, 5) at different temperatures. (b) Evo- +lution of the short- and long-range phase mutual information +calculated from (a) as a function of temperature, showing two +temperature scales Tc and Tp (vertical grey lines) from the +slope change. +(c) Temperature dependence of the variance +of two principal components θ± +R = +1 +√ +2(θ0 ± θR) from PCA +analyses of the data in (a) for short- and long-range phase +correlations. The inset shows the results for R = (1, 0) on a +larger temperature window. (d) Power-law decay of the phase +mutual information Ix +R with distance |R| = |Rx| + |Ry|. The +dotted lines are the fitting curves Ix +R ∝ |R|−α. (e) Tempera- +ture dependence of the extracted exponent α from (d). The +vertical grey lines mark the transition points identified in (b). +Fig. 2(a). As expected, this reveals two principal direc- +tions θ± +R = +1 +√ +2(θ0 ±θR) on the (θ0, θR) plane for all tem- +peratures, with opposite temperature dependence of their +variances. The superscript i is dropped because the data +on both bond directions i = x, y are considered together. +As shown in Fig. 2(c), the decrease of var(θ− +R) signifies the +increase of phase locking degree on the distance R with +lowering temperature. Interestingly, var(θ± +R) become al- + +0 +0.2 +0.4 +0.6 0.1 +0.2 +0.3 +0.4 0.1 +0.2 +0.3 +4 +0. +0.4 +(a) +(1,0) +030 +T=0.034 +T=0.062 +T=0.090 +T=0.560 +-1 +1 +(5,5) +T=0.034 +T=0.062 +T=0.090 +T=0.560 +-1 +-1 +-1 +1 +-1 +-1 +0/T +1元 +0 +0 +0 +0 +(b) +(c) +10 +T +T +T +e var(0) +V=1.5 +C +p +(1,0) +p. +Q + var( +(5,5) +4 +(1,0) +H +R +(0,1) +var( +3 +2 +0.2 +0.4 +R +(5,5) +var(0) +(1.0) +Ty +(5,5) +* var(0) +(5,5) +10 +0.01 +0.03 +0.05 +0.07 +0.09 +0.02 +0.04 +0.06 +0.08 +T +T +(d) +(e) +T +2.5 +T +10-1 +C +p +2 +1.5 +a +ILR +1 +0 T=0.03 +10-3 +2 +← T=0.05 +0.5 +T=0.07 +T=0.09 +0 +1 +2 +3 +4 +0 +0.05 +0.1 +R +T4 +most equal above Tp along both directions for R = (5, 5), +implying a uniform distribution on the (θ0, θR) plane and +hence the almost complete loss of long-range phase cor- +relation. On the other hand, the two variances still differ +for R = (1, 0), indicating the existence of short-range +correlation. The latter is to be suppressed only at much +higher temperatures above Tl = 0.25, as shown in the +inset of Fig. 2(c). Thus, Tl marks a temperature scale +above which no phase correlations are present (a disor- +dered state). Below Tl, the pairing fields start to develop +between neighboring bonds, indicating the onset of local +pairing only with short-range correlations. A long-range +or macroscopic phase correlation only emerges below Tp +(a phase fluctuating state) and eventually grows into a +global coherent state (the superconductivity) at Tc, be- +low which we can no longer distinguish short and long- +range correlations. +The above separation of different regions may be seen +from a different angle by plotting the mutual information +as a function of the “distance” |R| ≡ |Rx| + |Ry|. The +results of Ix +R are shown in Fig. 2(d). We see excellent +power-law decay, Ix +R ∝ |R|−α, at all temperatures. The +extracted decay rate α increases with temperature but +behaves differently in the three regions divided by Tc and +Tp. α varies most rapidly for Tc < T < Tp, which may +be understood from the suppression of long-range phase +correlation in this temperature region. For T < Tc, α +approaches almost zero, indicating the presence of long- +range coherence. The fact α > 2 for T > Tp implies a +rapid decay due to short-range correlation. +B. Effects on spectroscopic properties +Having established how the superconductivity is de- +veloped from its phase correlation, we now examine how +these may be related to the experimental observations +in real materials. First of all, the d-wave nature of the +superconducting pairing can be seen from the joint dis- +tribution of θx +0 and θy +0 connected to the same site. As +shown in the inset of Fig. 3(a), we find a rough correla- +tion, θy +0 = θx +0 ± π, namely a sign change of the pairing +fields along two perpendicular bond directions. +Their +mutual information Ixy +0 +is presented in Fig. 3(a) as a +function of temperature. Its slope changes at Tc and Tp +are similar to those of Ii +R between neighboring bonds. +The separation of short- and long-range phase correla- +tions have important consequences on the spectral prop- +erties, which may be studied by assuming a twist bound- +ary condition to overcome the finite size effect [68]. Fig- +ure 3(b) plots the total density of state at the Fermi en- +ergy N(0) normalized by its high temperature value. It is +almost a constant above Tp, but then decreases gradually +with lowering temperature, reflecting the spectral weight +depression induced by gap opening at zero energy. Inter- +estingly, its temperature derivative dN(0)/dT exhibits a +FIG. 3: (a) Temperature dependence of the phase mutual in- +formation Ixy +0 +between the x- and y-bonds attached to the +same site. +The inset shows their joint phase distribution +at T = 0.034, indicating d-wave correlations between two +bonds. (b) The normalized total density of states N(0) at +the Fermi energy and its temperature derivative dN(0)/dT +as functions of temperature, showing features at Tc and Tp +(grey vertical lines) determined from the phase mutual in- +formation. (c) Temperature evolution of the total density of +states N(ω), showing the gradual gap opening near the Fermi +energy. The inset illustrates the azimuthal angle φ and the +positions of node and antinode. (d) Temperature dependence +of the angle-resolved spectral function A(φ, 0) and its deriva- +tive dA(φ, 0)/dT at the noninteraction Fermi wave vector and +the Fermi energy at V = 1.5 for φ = 0.2, 0.3. +maximum at around Tc, consistent with the slope change +of the long-range phase mutual information Ix(y) +(5,5). Cor- +respondingly, as shown in Fig. 3(c), a pseudogap devel- +ops gradually on N(ω) with lowering temperature over +the intermediate range Tc < T < Tp. These establish a +close relation between the long-range phase correlation +and spectral gap of the superconductivity. +Similar temperature evolution is also seen in the angle- +resolved spectral function A(φ, 0) and its temperature +derivative dA(φ, 0)/dT along the azimuthal angle φ at +the noninteracting Fermi wave vector and zero energy. +For larger φ away from the antinode, the spectral func- +tion grows to a maximum at lower temperature and has +a higher residual value at zero temperature limit. Mean- +while, its temperature derivative becomes more enhanced +below Tc but suppressed above Tc. +Such an intrinsic +anisotropy has been observed in the latest ARPES ex- +periment [31]. +To clarify the origin of the anisotropy, we compare +in Fig. 4(a) the temperature dependence of the spectral + +(a) +(b) +T +T +1 +c +p +0.06 +0.8 +@ +LP/(O)Np +0 +10-2 +0.5 +Z +0.4 +0.4 +.0 +0.3 +0.02 +0.2 +0.2 +eN(0) +T=0.034 +T +T +0.1 +1 +0 +1 +c +-1 +/ +AdN(O)/dT +0 +0 +10° +0 +0.01 +0.03 +0.05 +0.07 +0.09 +0.01 +0.03 +0.05 +0.07 +0.09 +T +T +(c) +(p) +T +dA(0.2,0)/dT +0.6 +1.0 +dA(0.3.0)/dT +K +node +0.8 +0.10 +antinode +0 A(0.2,0) +0.4 +Lp/(0°Φ)vp +Φ= π/4 +(3) +0 +*A(0.3,0) +0.6 +0 +k +T +Z +x +0.2 +< 0.4 +0.2 +OFT +0.030 0.054 0.078 +0 +0 +-1 +0 +1 +0.01 +0.03 +0.05 +0.07 +0.09 +T +35 +FIG. 4: (a) Temperature evolution of the angle-resolved spec- +tral function A(φ, ω) on different positions of the Fermi sur- +face. (b) Comparison of the extracted gap ∆(φ, T ) from (a) +as functions of the temperature T . (c) Angular dependence +of the spectral gap ∆(φ, T ) and scattering rate Γ(φ, T ) on the +Fermi surface. ∆ and Γ are defined as the energy and the +half-maximum half-width of the upper peak of the spectral +function A(φ, ω). (d) and (e) give the calculated thermal en- +tropy S(φ, T ) and specific heat coefficient γ(φ, T ) as functions +of temperature at different positions (φ) on the Fermi surface. +functions A(φ, ω) for different azimuthal angle φ. Ob- +viously, they exhibit very different behaviors near nodal +or antinodal directions. Figure 4(b) plots the extracted +spectral gap ∆(φ, T ) as a function of temperature. With +increasing temperature, the gap closes first near the +nodal direction. Thus as shown in Fig. 4(c), it only sat- +isfies the ideal d-wave form ∆(φ, T ) ∝ cos(2φ) (green +dashed line) at sufficiently low temperatures. This is be- +yond the mean-field approximation but reflects the effect +of phase fluctuations. Consequently, the scattering rate +Γ(φ) estimated from the half-maximum half-width of the +upper peak of A(φ, ω) also exhibits smaller values near +the node. +The anisotropy of the spectral functions has an effect +on the angle-resolved thermal entropy S(φ, T ) and the +specific heat coefficient γ(φ, T ) = dS(φ, T )/dT by us- +ing S(φ, T ) = − +� +dωA(φ, ω)[f ln f + (1 − f) ln(1 − f)], +FIG. 5: +(a) Intensity plot of the spectral function A(ky, ω) +for kx = −3.047 at T = 0.022, 0.070, 0.098. (b) Extracted +dispersions from the spectral functions at different temper- +atures, showing back bending even above Tc. +The vertical +grey lines mark the Fermi wave vector ky = ±0.4712. +(c) +The dispersions near antinode and node for T = 0.066 and +0.034. For comparison, all curves are shifted such that the +Fermi wave vectors are located at ky = 0.5 (grey line). For +clarity, only the lower (negative energy) parts of the super- +conducting dispersions are shown. (d) Length of the Fermi +arc l(φ) as a function of temperature. The green arrows mark +Tc and Tp, and the dashed line is a guide to the eye. The inset +shows Lorentzian fit of the angle-dependent spectral function +A(φ, 0) on the Fermi surface. (e) Intensity plot of the spectral +function A(k, 0) at zero energy in the first Brillouin zone for +different temperatures, showing gradual development of the +Fermi arc. +where f is the Fermi distribution function. As shown in +Figs. 4(d) and 4(e), the resulting S(φ, T ) and γ(φ, T ) +exhibit similar temperature and angle dependence as +A(φ, 0) and dA(φ, 0)/dT in Fig. 3(d), which agree well +with the entropy reduction and specific heat anisotropy +reported in latest ARPES experiment [31]. +To further compare with experiment [31], Fig. 5(a) +plots the energy-momentum dependent spectral function + +(a) +(b +2.4 +1 +1.2 +0.5 +T=0.022 +T=0.022 +1.0 +1 +T=0.070 +0 +0.5 +T=0.098 +T=0.070 +0 +0.8 +-0.5 +0.4 +T=0.098 +-1.0 +-1.0 +-0.5 +0 +0.5 +1.0 +-1.0 +-0.5 +0 +0.5 +1.0 +K +E +y +(c) +(d) +1.5 +T=0.010 +0 +T=0.034 +8 +T=0.066 +8 +T=0.086 +A2 +0 +0.5 +1.0 +0.5 +near antinode +near node +T=0.066 +T=0.066 +-0.8 +T +T=0.034 + T=0.034 +p +0.4 +0.5 +0.6 +0.01 +0.05 +0.09 +k +T +y +2 +3 +0 +1 +0 +0.3 +0.7 +4 +1.1 +1.4 0 +0.2 +0.4 +0.6 +0.8 +(e) +T +T=0.018 +T=0.058 +T=0.090 +-T +0 +0 +- +T-T +T-T +0 +T +k +k +k0.030 +0.054 +0.078 +T +(a) +Φ=0.74 +(mΦ)v +0 +0 +0 +0 +1 +0 +3 +3 +3 +(b) +(c) +e Φ=0.15 +△A, T=0.010 +Φ=0.31 +△, T=0.050 +0.6 +0.6 +Φ=0.46 +T, T=0.050 +Φ=0.62 +(Φ, +Q +0.4 +Q +0.2 +△0.2 +0 +0.02 +0.04 r +T 0.06 +0.08 +0.1 +0 +0.5 +1 +1.5 +Φ +(p) +(e) +1.2 +Φ=0.15 +0.14 +Φ=0.15 +Φ=0.23 +Φ=0.23 +74) +母 Φ=0.31 +Φ Φ=0.31 +0.8 +0.10 +0.4 +0.06 +S +d +0.03 +0.05 +0.07 +0.09 +0.04 +0.06 +0.08 +T +T6 +A(ky, E) at fixed kx = −3.047, which allows us to ex- +tract the energy of the maxima for each ky. The resulting +dispersions are shown in Fig. 5(b) for T = 0.098, 0.07, +0.022. We see the dispersion exhibits back bending even +for T = 0.07 > Tc but almost recovers the normal state +one for T = 0.098 > Tp. The vector kG where the bend- +ing occurs is the same as the Fermi vector kF = ±0.4712 +(the grey vertical line), which differs from the predic- +tion based on density wave or magnetic order pictures. +The extracted dispersion also manifests anisotropy due +to phase fluctuations. In Fig. 5(c), the dispersion near +kF (the grey vertical line) shows angle dependent gap +at T = 0.034, but a clear node-antinode dichotomy at +T = 0.066, with the near-node dispersion crossing the +Fermi energy and the near-antinode dispersion exhibit- +ing a gap and back bending, as reported previously in +underdoped experiments [15]. +The effect of the phase correlation is also reflected in +the topology of the Fermi surface. As shown in the in- +set of Fig. 5(d), the angle-dependent spectral function +A(φ, 0) is gradually suppressed away from the nodal point +with lowering temperature. This leads to a variation of +the Fermi arc [69–71], whose length l(φ), estimated from +the 0.6-maximum width of the spectral peak, is plotted +in Fig. 5(d) as a function of temperature. We see l(φ) +almost saturates below Tc, increases linearly with tem- +perature in the intermediate region, and reaches a full +length (Fermi surface) at high temperatures. This con- +firms its connection with the phase correlation identified +using the phase mutual information. Such a temperature +variation of the Fermi arc length has been observed in +scanning tunneling spectroscopy (STS) experiment [20], +implying that the zero arc length reported in the ARPES +experiments [72] might originate from the peaks of the ar- +tificially symmetrized A(φ, ω). To be specific, Fig. 5(e) +maps out the zero-energy spectral function A(k, 0) in the +first Brillouin zone and we see a clear evolution from the +Fermi arc at T = 0.058 to the Fermi surface T = 0.09. +This variation indicates that the Bogoliubov quasiparti- +cle appears at different temperatures in different regions +of the Fermi surfaces. The arc is more broadened close +to the node, consistent with previous experiment [73]. +C. The superconducting phase diagram and a +strong-coupling plaquette state +Having identified the different regions of phase correla- +tions at a fixed V , we now turn to their variation with the +pairing interaction. As shown in Figs. 6(a) and 6(b), the +range of exponential temperature dependence also varies. +As V increases, the curves first move to higher tempera- +tures, but then shift somewhat backwards. Such a non- +monotonic variation is better seen in Figs. 6(c) and 6(d), +where the phase mutual information Ix +(1,0) and Ix +(5,5) are +replotted as a function of V for different temperatures. +FIG. 6: Comparison of the short- and long-range phase mu- +tual information with R = (1, 0) and (5, 5) (a)(b) as func- +tions of temperature for different pairing interactions, and +(c)(d) as functions of the pairing interactions for different tem- +peratures. (e) The superconducting phase diagram with Tc +and Tp determined from the phase mutual information and +T2 from the onset of two-peak amplitude distribution. +(f) +Comparison of the condensation energy Eg for the uniform +mean-field solution and the static auxiliary field Monte Carlo +(SAF-MC) solution. Also shown in the variance of the ampli- +tude distribution var(|∆|) from the Monte Carlo simulations +at T = 0.001. +Both exhibit nonmonotonic behavior with increasing V +at low temperatures, indicating that the phase correla- +tions are suppressed when the pairing interaction is get- +ting too large. As we will see, this is closely associated +with the two-peak structure of the amplitude distribution +in Fig. 1(d). +Taken together, a superconducting phase diagram can +be constructed and shown in Fig. 6(e), where both Tc +and Tp behave nonmonotonically with V . Also shown is +a third temperature scale T2, below which the amplitude +distribution has two peaks. +T2 only appears for suffi- +ciently large V , indicating a strong coupling limit whose +nature will be clarified later. Interestingly, we see that + +(a) +(b) +10 +0 +0 V=0.9 ← V=1.3 +e V=0.9 →V=1.3 +10° +V=2.1 AV=3.3 +V=2.1 A V=3.3 +V=5.3 +V=5.3 += +(0') +(5,5) +X +Q +2 +10° +Q +Q +Q +00 +10 +0.01 0.03 0.05 0.07 0.09 0.11 +0.01 0.03 0.05 0.07 0.09 0.11 +T +T +(c) +(p) +(5,5) +X +0 T=0.030 +T=0.030 +T=0.050 +含T=0.050 +Φ- T=0.075 +Φ- T=0.075 +T=0.125 +T=0.125 +10 +4 +10 +0.5 +2.5 +4.5 +6.5 +0.5 +2.5 +4.5 +6.5 +V +V +(e) +(f) +0.10 +-20 +0.08 +0.6 +0000 +[△)) + uniform +0.06 +E +T +0 +DIBA +SAF-MC +-60 +0.04 +var(I) +p +0.2 +0.02 +2 +00 +0 +0.5 +2.5 +4.5 +6.5 +2 +4 +6 +8 +V7 +FIG. 7: +(a) Typical configurations of the pairing fields at +T = 0.001 for V = 0.5, 3.7, 6.1. The square size represents +their amplitude and the colors mark the sign of their phase +θ ∈ (−π, π]. (b) Evolution of the total density of state N(ω) +with pairing interactions at T = 0.025, showing a crossover +from V -shape to U-shape. +(c) Temperature dependence of +N(ω) at V = 4.9. The inset shows the joint distribution of θx +0 +and θy +0 at T = 0.02, indicating a d-wave form of the pairing +fields on the bonds in the plaquette state. +Tc takes its maximum near the critical V of the two-peak +distribution and is suppressed as T2 increases. This sug- +gests that the superconductivity is competing with this +strong coupling state. +To clarify this issue, we compare in Fig. 7(a) typical +Monte Carlo configurations of the pairing fields for weak, +intermediate, and strong V at T = 0.001. The size of +the square represents the amplitude |∆| and the color +denotes the sign of the phase θ ∈ (−π, π]. +For weak +V = 0.5, the distribution on the lattice is random, re- +flecting that the system is not yet in a phase coherent +region (T > Tc). For intermediate V = 3.7, we find a +uniform distribution of the amplitude, while the phase +changes sign periodically and exhibits a d-wave pattern. +It is straightforward to identify this state as the uniform +d-wave superconductivity. For strong V = 6.1, the ampli- +tude distribution is no longer uniform but exhibits cluster +patterns. We call it a charge-4e d-wave plaquette state +since it is formed of local plaquettes [74] with 4 bonds +of large |∆| in a unit cell surrounded by weak bonds in +a 2 × 2 cell. The plaquette has the same sign structure +as the d-wave superconductivity. The whole state can +be regarded as weakly connected charge-4e plaquettes. +Clearly, this is not a phase separation and the two-peak +feature of the amplitude distribution is a reflection of the +special plaquette structure. This state breaks the trans- +lational invariance of the pairing fields, but keeps the +uniform distribution of the electron densities. It persists +to a very large V = 7.5, beyond which the bonds become +less correlated as t/V → 0. +To show that the plaquette state is stable over the uni- +form superconductivity, we calculate their condensation +energies using +Eg = +� +l +|ξl| + +� +⟨ij⟩ +2 ¯∆ij∆ij +V +− +� +l +Λl, +(7) +where l = 1, 2, · · · , N and ξl is the eigenvalue of the +non-interacting Hamiltonian. Figure 6(f) compares the +condensation energies of the mean-field uniform solution +and the Monte Carlo solution. For small V , we see they +are almost equal. But beyond the critical V of the pla- +quette state, the mean-field uniform solution has higher +energy than the Monte Carlo (plaquette) solution. +In +this region, the variance var(|∆|) of the amplitude dis- +tribution grows rapidly with increasing V , reflecting an +increasing difference between the strong and weak bonds. +The transition to the plaquete state may be detected +from the V-shape-to-U-shape change of the density of +state as shown in Fig. 7(b). +Figure 7(c) plots N(ω) +at V = 4.9 for different temperatures. +The plaquette +state melts as N(ω) changes from U-shape to V-shape +with increasing temperature. Note that a U-shaped curve +is typically ascribed to s-wave superconductivity. How- +ever, the plaquette state still exhibits d-wave bonds with +θx +0 = θy +0 ± π as shown in the inset of Fig. 7(c). Simi- +lar variation has been observed in STS measurement in +twisted trilayer graphene [58], where it was argued to +originate from two-particle bound states. In our simu- +lations, the four-particle plaquette state is more favored +with nearest-neighbour pairing interaction. +It has been suggested that strong attractive interaction +may always lead to phase separation [75–79]. It could +be that the pairing interaction for the plaquette state is +not yet strong enough. For sufficiently large V , we find +randomly distributed dimers and plaquettes, possibly be- +cause the pairing correlations are suppressed as t/V be- +comes too small. The plaquette state may be in some +sense related to pair density wave (PDW) [80–82]. But +our derived configuration is special. It does no induce +any charge density wave and may only be produced by +a complicated combination of uniform superconductivity +and bidirectional PDW states of the wave vector (0, π) +and (π, 0). It may thus be better viewed as a different +strong-coupling limit of the d-wave superconductivity. +IV. DISCUSSION AND CONCLUSIONS +We have applied the static auxiliary field Monte Carlo +method to study phase correlations of the superconduct- +ing pairing fields. We can reproduce the weak-coupling + +(a) +V=0.5 +V=3.7 +V=6.1 +10 +8 +9 +4 +2 +起 ++ ++ +1 +2 +4 +6 +8 +10 +2 +4 +6 +8 +10 +2 +4 +6 +8 +10 +x +X +X +(b) +(c) +T=0.02 +V=3.1 V=4.0 +1.8 +-V=4.8 V=6.8 +0.2 +1.0 +0.2 +(3)N +-1 +0.2 +(3)N +T=0.025 +-1 +1 +0.1 +0.1 +V=4.9 +0 +T +0.01 +0.06 +0.12 +0 +-2 +-1 +0 +1 +2 +-2 +-1 +0 +1 +2 +3 +38 +BCS solution of the mean-field theory and identify a re- +gion above Tc by separation between short- and long- +range phase correlations for moderate and strong pair- +ing interactions. This phase fluctuating region above the +uniform d-wave superconductivity has a number of spec- +troscopic features including the anisotropy of the angle- +resolved gap opening, scattering rate, and specific heat +coefficient, as well as gradual development of the Fermi +arc. The angular or momentum dependence of the gap +opening temperature may be a general feature of phase +fluctuations for all kinds of orders. For sufficiently strong +pairing interaction, our simulation reveals a competing +charge-4e plaquette state with d-wave-like bonds and a +U-shaped density of states. The superconducting transi- +tion temperature seems maximal near the critical pairing +interaction of the plaquette state, raising an interesting +question concerning their relationship. +It should be mentioned that we begin the calcula- +tions with an attractive spin-singlet pairing interaction. +This form of Hamiltonian can be derived naturally from +nearest-neighbor antiferromagnetic spin fluctuation in- +teraction, which has been argued to provide a holistic +picture for all cuprates [83]. +But the superconductiv- +ity may be suppressed by magnetic order if the electron +density is close to half filling. Here we only consider the +overdoped region, where the magnetic long-range order is +not important and we only have to deal with the auxiliary +pairing fields. Since onsite pairing is not supported due +to strong onsite Coulomb repulsion, it is reasonable to +consider the pairing fields only between nearest-neighbor +sites. The agreement of our results with spectroscopic +experiment confirms this spin fluctuation model in over- +doped cuprates. On the other hand, attractive charge +density interaction [84, 85] may also yield similar pair- +ing interaction. However, phenomenological interactions +of charge fluctuations typically give a positive nearest- +neighbour charge density interaction [59, 86]. +Hence, +more exotic forms are needed in order to explain cuprates +using charge interaction, which may be in conflict with +the X-ray experiment [87]. +It may be useful to compare our results of the uniform +superconductivity with the XY model which is believed +to describe the physics of two-dimensional superconduc- +tivity [10, 12, 88]. For this purpose, we have to first define +the superconducting order parameter on the lattice sites, +namely ∆i = 1 +4(∆i,i+x +∆i,i−x −∆i,i+y −∆i,i−y), where +∆i,i±x and ∆i,i±y are the pairing fields on the four bonds +connected to site i. +The number of vortices can then +be calculated using ∆i following the standard definition +[13] and found to be nearly zero below Tc, grow rapidly +between Tc and Tp and slowly above Tp, and eventually +saturate above Tl. The rapid increase above Tc is in good +correspondence with that predicted for the Berezinskii- +Kosterlitz-Thouless (BKT) transition due to the unbind- +ing of vortices and antivortices [89–91], indicating that +our Tc is exactly the BKT transition temperature. The +power law decay of the phase mutual information indi- +cates a quasi-long range order that does not break U(1) +symmetry conforming to the well-known Mermin-Wagner +theorem [92, 93]. +Our identification of three tempera- +ture scales and four distinct regions may offer some in- +sight into the triple transition in resistance experment +[27], where normal metal, pseudogap (incoherent metal), +phase fluctuation, and superconductivity are separated. +A similar scenario may also be related to the transition +between superconductivity and normal metal, where dis- +order or magnetic field may broaden the transition and +lead to one or two intermediate regions [94, 95]. +Superconducting phase fluctuations also play an im- +portant role in other superconductors, such as Fe-based +superconductors [96–99] and disordered conventional su- +perconductors [100–106]. Our method may also provide +useful insight into the interplay between phase fluctua- +tions and other important effects such as disorder, multi- +band, and time reversal symmetry breaking in these sys- +tems. +This work was supported by the National Natural Sci- +ence Foundation of China (NSFC Grants No. 11974397, +No. 12174429, and No. 12204075), the National Key Re- +search and Development Program of China (Grant No. +2022YFA1402203), and the Strategic Priority Research +Program of the Chinese Academy of Sciences (Grant No. +XDB33010100). +∗ yifeng@iphy.ac.cn +[1] V. J. Emery and S. A. Kivelson, Importance of Phase +Fluctuations in Superconductors with Small Superfluid +Density, Nature 374, 434 (1995). +[2] M. Franz and A. Millis, Phase Fluctuations and Spectral +Properties of Underdoped Cuprates, Phys. Rev. B 58, +14572 (1998). +[3] H. J. Kwon, A. T. Dorsey, and P. J. Hirschfeld, Ob- +servability of Quantum Phase Fluctuations in Cuprate +Superconductors, Phys. Rev. Lett. 86, 3875 (2001). +[4] K. V. Samokhin and B. Mitrovi´c, Nodal Quasiparticles +and Classical Phase Fluctuations in d-Wave Supercon- +ductors, Phys. Rev. Lett. 92, 057002 (2004). +[5] M. R. Norman, A. Kanigel, M. Randeria, U. Chatter- +jee, and J. C. Campuzano, Modeling the Fermi Arc in +Underdoped Cuprates, Phys. Rev. B 76, 174501 (2007). +[6] E. Berg and E. Altman, Evolution of the Fermi Surface +of d-Wave Superconductors in the Presence of Thermal +Phase Fluctuations, Phys. Rev. Lett. 99, 247001 (2007). +[7] Z. Teˇsanovi´c, d-Wave Duality and Its Reflections in +High-Temperature Superconductors, Nat. Phys. 4, 408 +(2008). +[8] S. Banerjee, T. V. Ramakrishnan, and C. Dasgupta, +Effect of Pairing Fluctuations on Low-Energy Electronic +Spectra in Cuprate Superconductors, Phys. Rev. B 84, +144525 (2011). +[9] A. Allais, D. Chowdhury, and S. Sachdev, Connecting +High-Field Quantum Oscillations to Zero-Field Electron +Spectral Functions in the Underdoped Cuprates, Nat. + +9 +Commun. 5, 5771 (2014). +[10] T. Eckl, D. J. Scalapino, E. Arrigoni, and W. Hanke, +Pair Phase Fluctuations and the Pseudogap, Phys. Rev. +B 66, 140510(R) (2002). +[11] M. Mayr, G. Alvarez, C. S¸en, and E. Dagotto, Phase +Fluctuations in Strongly Coupled d-Wave Superconduc- +tors, Phys. Rev. Lett. 94, 217001 (2005). +[12] Q. Han, T. Li, and Z. D. Wang, Pseudogap and Fermi- +Arc Evolution in the Phase-Fluctuation Scenario, Phys. +Rev. B 82, 052503 (2010). +[13] Y. W. Zhong, T. Li, and Q. Han, Monte Carlo Study +of Thermal Fluctuations and Fermi-Arc Formation in d- +Wave Superconductors, Phys. Rev. B 84, 024522 (2011). +[14] D. K. Singh, S. Kadge, Y. Bang, and P. Majumdar, +Fermi Arcs and Pseudogap Phase in a Minimal Micro- +scopic Model of d-Wave Superconductivity, Phys. Rev. +B 105, 054501 (2022). +[15] A. Kanigel, U. Chatterjee, M. Randeria, M. R. Nor- +man, G. Koren, K. Kadowaki, and J. C. Campuzano, +Evidence for Pairing above the Transition Temperature +of Cuprate Superconductors from the Electronic Dis- +persion in the Pseudogap Phase, Phys. Rev. Lett. 101, +137002 (2008). +[16] H. Ding, T. Yokoya, J. C. Campuzano, T. Takahashi, +M. Randeria, M. R. Norman, T. Mochiku, K. Kadowaki, +and J. Giapintzakis, Spectroscopic Evidence for a Pseu- +dogap in the Normal State of Underdoped High-Tc Su- +perconductors, Nature 382, 51 (1996). +[17] L. Li, Y. Wang, S. Komiya, S. Ono, Y. Ando, G. D. +Gu, and N. P. Ong, Diamagnetism and Cooper Pairing +above Tc in Cuprates, Phys. Rev. B 81, 054510 (2010). +[18] K. K. Gomes, A. N. Pasupathy, A. Pushp, S. Ono, +Y. Ando, and A. Yazdani, Visualizing Pair Formation +on the Atomic Scale in the High-Tc Superconductor +Bi2Sr2CaCu2O8+δ, Nature 447, 569 (2007). +[19] H. B. Yang, J. D. Rameau, P. D. Johnson, T. Valla, +A. Tsvelik, and G. D. Gu, Emergence of Preformed +Cooper Pairs from the Doped Mott Insulating State in +Bi2Sr2CaCu2O8+δ, Nature 456, 77 (2008). +[20] J. Lee, K. Fujita, A. R. Schmidt, C. K. Kim, H. Eisaki, +S. Uchida, and J. C. Davis, Spectroscopic Fingerprint of +Phase-Incoherent Superconductivity in the Underdoped +Bi2Sr2CaCu2O8+δ, Science 325, 1099 (2009). +[21] L. S. Bilbro, R. V. Aguilar, G. Logvenov, O. Pelleg, I. +Boˇzovi´c, and N. P. Armitage, Temporal Correlations of +Superconductivity above the Transition Temperature in +La2−xSrxCuO4 Probed by Terahertz Spectroscopy, Nat. +Phys. 7, 298 (2011). +[22] P. Zhou, L. Chen, Y. Liu, I. Sochnikov, A. T. Bollinger, +M. G. Han, Y. Zhu, X. He, I. Boˇzovi´c, and D. Natelson, +Electron Pairing in the Pseudogap State Revealed by +Shot Noise in Copper Oxide Junctions, Nature 572, 493 +(2019). +[23] T. Kondo, Y. Hamaya, A. D. Palczewski, T. Takeuchi, +J. S. Wen, Z. J. Xu, G. Gu, J. Schmalian, and A. +Kaminski, Disentangling Cooper-Pair Formation above +the Transition Temperature from the Pseudogap State +in the Cuprates, Nat. Phys. 7, 21 (2011). +[24] R. H. He, M. Hashimoto, H. Karapetyan, J. D. Koralek, +J. P. Hinton, J. P. Testaud, V. Nathan, Y. Yoshida, H. +Yao, K. Tanaka, W. Meevasana, R. G. Moore, D. H. +Lu, S. K. Mo, M. Ishikado, H. Eisaki, Z. Hussain, T. P. +Devereaux, S. A. Kivelson, J. Orenstein, A. Kapitulnik, +and Z. X. Shen, From a Single-Band Metal to a High- +Temperature Superconductor via Two Thermal Phase +Transitions, Science 331, 1579 (2011). +[25] A. Kaminski, S. Rosenkranz, H. M. Fretwell, J. C. Cam- +puzano, Z. Li, H. Raffy, W. G. Cullen, H. You, C. G. Ol- +son, C. M. Varma, and H. H¨ochst, Spontaneous Break- +ing of Time-Reversal Symmetry in the Pseudogap State +of a High-Tc Superconductor, Nature 416, 610 (2002). +[26] N. K. Gupta, C. McMahon, R. Sutarto, T. Shi, R. Gong, +H. I. Wei, K. M. Shen, F. He, Q. Ma, M. Dragomir, B. +D. Gaulin, and D. G. Hawthorn, Vanishing Nematic Or- +der beyond the Pseudogap Phase in Overdoped Cuprate +Superconductors, Proc. Natl. Acad. Sci. U. S. A. 118, +e2106881118 (2021). +[27] P. +M. +C. +Rourke, +I. +Mouzopoulou, +X. +Xu, +C. +Panagopoulos, Y. Wang, B. Vignolle, C. Proust, E. V. +Kurganova, U. Zeitler, Y. Tanabe, T. Adachi, Y. Koike, +and N. E. Hussey, Phase-Fluctuating Superconductivity +in Overdoped La2−xSrxCuO4, Nat. Phys. 7, 455 (2011). +[28] I. Bozovic, X. He, J. Wu, and A. T. Bollinger, Depen- +dence of the Critical Temperature in Overdoped Copper +Oxides on Superfluid Density, Nature 536, 309 (2016). +[29] F. Mahmood, X. He, I. BoˇZovi´c, and N. P. Armitage, +Locating the Missing Superconducting Electrons in the +Overdoped Cuprates La2−xSrxCuO4, Phys. Rev. Lett. +122, 027003 (2019). +[30] Y. He, S. Di Chen, Z. X. Li, D. Zhao, D. Song, Y. +Yoshida, H. Eisaki, T. Wu, X. H. Chen, D. H. Lu, +C. Meingast, T. P. Devereaux, R. J. Birgeneau, M. +Hashimoto, D. H. Lee, and Z. X. Shen, Superconducting +Fluctuations in Overdoped Bi2Sr2CaCu2O8+δ, Phys. +Rev. X 11, 031068 (2021). +[31] S. Di Chen, M. Hashimoto, Y. He, D. Song, J. F. He, Y. +F. Li, S. Ishida, H. Eisaki, J. Zaanen, T. P. Devereaux, +D. H. Lee, D. H. Lu, and Z. X. Shen, Unconventional +Spectral Signature of Tc in a Pure d-Wave Supercon- +ductor, Nature 601, 562 (2022). +[32] C. Zou, Z. Hao, X. Luo, S. Ye, Q. Gao, M. Xu, +X. Li, P. Cai, C. Lin, X. Zhou, D.-H. Lee, and Y. +Wang, Particle–Hole Asymmetric Superconducting Co- +herence Peaks in Overdoped Cuprates, Nat. Phys. 18, +551 (2022). +[33] D. Wang, J. Xu, H. Zhang, and Q. Wang, Anisotropic +Scattering Caused by Apical Oxygen Vacancies in Thin +Films of Overdoped High-Temperature Cuprate Super- +conductors, Phys. Rev. Lett. 128, 137001 (2022). +[34] Z. X. Li, S. A. Kivelson, and D. H. Lee, Superconductor- +to-Metal Transition in Overdoped Cuprates, npj Quan- +tum Mater. 6, 36 (2021). +[35] W. W´ei, W. Xiang, and T. Andr´e-Marie, Non-Fermi +Liquid Phase and Linear-in-Temperature Scattering +Rate in Overdoped Two-Dimensional Hubbard Model, +Proc. Natl. Acad. Sci. 119, e2115819119 (2022). +[36] J.-J. Dong, D. Huang, and Y. Yang, Mutual Informa- +tion, Quantum Phase Transition and Phase Coherence +in Kondo Systems, Phys. Rev. B 104, L081115 (2021). +[37] A. Mukherjee, N. D. Patel, S. Dong, S. Johnston, A. +Moreo, and E. Dagotto, Testing the Monte Carlo-Mean +Field Approximation in the One-Band Hubbard Model, +Phys. Rev. B 90, 205113 (2014). +[38] S. Liang, A. Moreo, and E. Dagotto, Nematic State of +Pnictides Stabilized by Interplay between Spin, Orbital, +and Lattice Degrees of Freedom, Phys. Rev. Lett. 111, +047004 (2013). +[39] Y. Dubi, Y. Meir, and Y. Avishai, Nature of the + +10 +Superconductor-Insulator Transition in Disordered Su- +perconductors, Nature 449, 876 (2007). +[40] K. Pasrija, P. B. Chakraborty, and S. Kumar, Effective +Hamiltonian Based Monte Carlo for the BCS to BEC +Crossover in the Attractive Hubbard Model, Phys. Rev. +B 94, 165150 (2016). +[41] M. Karmakar, Pauli Limited d-Wave Superconductors: +Quantum Breached Pair Phase and Thermal Transi- +tions, J. Phys. Condens. Matter 32, 405604 (2020). +[42] T. M. Cover and J. A. Thomas, Elements of Information +Theory (Wiley Series in Telecommunications and Signal +Processing) (Wiley-Interscience, USA, 2006). +[43] A. Kraskov, H. St¨ogbauer, and P. Grassberger, Esti- +mating mutual information, Phys. Rev. E 69, 066138 +(2004). +[44] M. K. Varanasi, Estimation of the Information by +an Adaptive Partitioning of the Observation Space +Georges, IEEE Trans. Inf. Theory 45, 1315 (1999). +[45] S. Gao, G. Ver Steeg, and A. Galstyan, Efficient Esti- +mation of Mutual Information for Strongly Dependent +Variables, Proceedings of the 18th International Con- +ference on Artificial Intelligence and Statistics, 38, 277 +(2015). +[46] S. Khan, S. Bandyopadhyay, A. R. Ganguly, S. Saigal, +D. J. Erickson, V. Protopopescu, and G. Ostrouchov, +Relative Performance of Mutual Information Estima- +tion Methods for Quantifying the Dependence among +Short and Noisy Data, Phys. Rev. E 76, 026209 (2007). +[47] M. I. Belghazi, A. Baratin, S. Rajeswar, S. Ozair, Y. +Bengio, A. Courville, and R. D. Hjelm, Mutual Informa- +tion Neural Estimation, Proceedings of the 35th Inter- +national Conference on Machine Learning, PMLR 80, +531 (2018). +[48] B. Poole, S. Ozair, A. V. D. Oord, A. A. Alemi, and +G. Tucker, On Variational Bounds of Mutual Informa- +tion, Proceedings of the 36th International Conference +on Machine Learning, PMLR 97, 5171 (2019). +[49] T. Speed, A Correlation for the 21st Century (Science +(2011) (1502)), Science 334, 1502 (2012). +[50] D. Reshef, Y. Reshef, H. Finucane, S. Grossman, G. +Mcvean, P. Turnbaugh, E. Lander, M. Mitzenmacher, +and P. Sabeti, Detecting Novel Associations in Large +Data Sets, Science 334, 1518 (2011). +[51] J. B. Kinney and G. S. Atwal, Equitability, Mutual +Information, and the Maximal Information Coefficient, +Proc. Natl. Acad. Sci. U. S. A. 111, 3354 (2014). +[52] M. Koch-Janusz and Z. Ringel, Mutual Information, +Neural Networks and the Renormalization Group, Nat. +Phys. 14, 578 (2018). +[53] A. Nir, E. Sela, R. Beck, and Y. Bar-Sinai, Machine- +Learning Iterative Calculation of Entropy for Physical +Systems, Proc. Natl. Acad. Sci. U. S. A. 117, 30234 +(2020). +[54] D. E. G¨okmen, Z. Ringel, +S. D. Huber, and M. +Koch-Janusz, Statistical Physics through the Lens of +Real-Space Mutual Information, Phys. Rev. Lett. 127, +240603 (2021). +[55] F. Parisen Toldin, T. Sato, and F. F. Assaad, Mutual +Information in Heavy-Fermion Systems, Phys. Rev. B +99, 155158 (2019). +[56] C. Walsh, M. Charlebois, P. S´emon, G. Sordi, and A. M. +S. Tremblay, Information-Theoretic Measures of Super- +conductivity in a Two-Dimensional Doped Mott Insu- +lator, Proc. Natl. Acad. Sci. U. S. A. 118, e2104114118 +(2021). +[57] G. Nicoletti and D. M. Busiello, Mutual Information +Disentangles Interactions from Changing Environments, +Phys. Rev. Lett. 127, 228301 (2021). +[58] H. Kim, Y. Choi, C. Lewandowski, A. Thomson, Y. +Zhang, R. Polski, K. Watanabe, T. Taniguchi, J. Al- +icea, and S. Nadj-Perge, Evidence for Unconventional +Superconductivity in Twisted Trilayer Graphene, Na- +ture 606, 494 (2022). +[59] P. Monthoux, D. Pines, and G. G. Lonzarich, Supercon- +ductivity without Phonons, Nature 450, 1177 (2007). +[60] P. Coleman, Introduction to Many-body Physics, (Cam- +bridge University Press, Cambridge, England, 2015). +[61] S. Kumar and P. Majumdar, A Travelling Cluster Ap- +proximation for Lattice Fermions Strongly Coupled to +Classical Degrees of Freedom, Eur. Phys. J. B 50, 571 +(2006). +[62] A. Mukherjee, N. D. Patel, C. Bishop, and E. Dagotto, +Parallelized Traveling Cluster Approximation to Study +Numerically Spin-Fermion Models on Large Lattices, +Phys. Rev. E 91, 063303 (2015). +[63] J. P. Carbotte, Properties of a Two-Dimensional D- +Wave Superconductor from Phenomenological Suscep- +tibility, Phys. Rev. B 49, 4176 (1994). +[64] P. Monthoux and G. G. Lonzarich, Magnetically Me- +diated Superconductivity in Quasi-Two and Three Di- +mensions, Phys. Rev. B 63, 054529 (2001). +[65] M. Plat´e, J. D. F. Mottershead, I. S. Elfimov, D. C. +Peets, R. Liang, D. A. Bonn, W. N. Hardy, S. Chi- +uzbaian, M. Falub, M. Shi, L. Patthey, and A. Dam- +ascelli, Fermi Surface and Quasiparticle Excitations of +Overdoped Tl2Ba2CuO6+δ, Phys. Rev. Lett. 95, 077001 +(2005). +[66] B. Vignolle, A. Carrington, R. A. Cooper, M. M. J. +French, A. P. Mackenzie, C. Jaudet, D. Vignolles, C. +Proust, and N. E. Hussey, Quantum Oscillations in an +Overdoped High-Tc Superconductor, Nature 455, 952 +(2008). +[67] P. Monthoux, A. V Balatsky, and D. Pines, Toward +a Theory of High-Temperature Superconductivity in +the Antiferromagnetically Correlated Cuprate Oxides, +Phys. Rev. Lett. 67, 3448 (1991). +[68] J. Li, C. Cheng, T. Paiva, H.-Q. Lin, and R. Mondaini, +Giant Magnetoresistance in Hubbard Chains, Phys. +Rev. Lett. 121, 020403 (2018). +[69] N. Harrison, R. D. McDonald, and J. Singleton, Cuprate +Fermi Orbits and Fermi Arcs: +The Effect of Short- +Range Antiferromagnetic Order, Phys. Rev. Lett. 99, +206406 (2007). +[70] G. Alvarez and E. Dagotto, Fermi Arcs in the Supercon- +ducting Clustered State for Underdoped Cuprate Super- +conductors, Phys. Rev. Lett. 101, 177001 (2008). +[71] A. Greco, Evidence for Two Competing Order Param- +eters in Underdoped Cuprate Superconductors from a +Model Analysis of Fermi-Arc Effects, Phys. Rev. Lett. +103, 217001 (2009). +[72] A. Kanigel, M. R. Norman, M. Randeria, U. Chatterjee, +S. Souma, A. Kaminski, H. M. Fretwell, S. Rosenkranz, +M. Shi, T. Sato, T. Takahashi, Z. Z. Li, H. Raffy, K. +Kadowaki, D. Hinks, L. Ozyuzer, and J. C. Campuzano, +Evolution of the Pseudogap from Fermi Arcs to the +Nodal Liquid, Nat. Phys. 2, 447 (2006). +[73] T. J. Reber, N. C. Plumb, Z. Sun, Y. Cao, Q. Wang, K. +McElroy, H. Iwasawa, M. Arita, J. S. Wen, Z. J. Xu, G. + +11 +Gu, Y. Yoshida, H. Eisaki, Y. Aiura, and D. S. Dessau, +The Origin and Non-Quasiparticle Nature of Fermi Arcs +in Bi2Sr2CaCu2O8+δ, Nat. Phys. 8, 606 (2012). +[74] M. Danilov, E. G. C. P. van Loon, S. Brener, S. Iskakov, +M. I. Katsnelson, and A. I. Lichtenstein, Npj Quantum +Mater. 7, 50 (2022). +[75] A. Nazarenko, A. Moreo, E. Dagotto, and J. Riera, +Superconductivity in a Model of Correlated Fermions, +Phys. Rev. B 54, R768 (1996). +[76] M. Shaw and W. P. Su, Phase Separation Due to +Nearest Neighbor Attractive Interactions in a Two- +Dimensional Model, Mod. Phys. Lett. B 17, 853 (2003). +[77] W. P. Su, Phase Separation and d-Wave Superconduc- +tivity in a Two-Dimensional Extended Hubbard Model +with Nearest-Neighbor Attractive Interaction, Phys. +Rev. B 69, 012506 (2004). +[78] V. J. Emery, Phase Separation in t-J model, Phys. Rev. +Lett. 64, 475 (1990). +[79] M. D. Cookson and P. M. R. Stirk, Indications of dx2−y2 +Superconductivity in the Two Dimensional t-J Model, +Phys. Rev. Lett. 70, 682 (1993). +[80] P. A. Lee, Amperean Pairing and the Pseudogap Phase +of Cuprate Superconductors, Phys. Rev. X 4, 031017 +(2014). +[81] C. Setty, L. Fanfarillo, and P. J. Hirschfeld, Micro- +scopic Mechanism for Fluctuating Pair Density Wave, +arXiv:2110.13138 (2021). +[82] C. Setty, J. Zhao, L. Fanfarillo, E. W. Huang, P. J. +Hirschfeld, P. W. Phillips, and K. Yang, Exact Solution +for Finite Center-of-Mass Momentum Cooper Pairing, +arXiv:2209.10568(2022). +[83] Z. Chen, Y. Wang, S. N. Rebec, T. Jia, M. Hashimoto, +D. Lu, B. Moritz, R. G. Moore, T. P. Devereaux, and Z. +X. Shen, Anomalously Strong Near-Neighbor Attraction +in Doped 1D Cuprate Chains, Science 373, 1235 (2021). +[84] N. Plonka, C. J. Jia, Y. Wang, B. Moritz, and T. +P. Devereaux, Fidelity Study of Superconductivity in +Extended Hubbard Models, Phys. Rev. B 92, 024503 +(2015). +[85] M. Jiang, Enhancing d-Wave Superconductivity with +Nearest-Neighbor Attraction in the Extended Hubbard +Model, Phys. Rev. B 105, 024510 (2022). +[86] P. Monthoux and G. G. Lonzarich, Density-Fluctuation- +Mediated Superconductivity, Phys. Rev. B 69, 064517 +(2004). +[87] H. H. Kim, E. Lefran¸cois, K. Kummer, R. Fumagalli, N. +B. Brookes, D. Betto, S. Nakata, M. Tortora, J. Porras, +T. Loew, M. E. Barber, L. Braicovich, A. P. Mackenzie, +C. W. Hicks, B. Keimer, M. Minola, and M. Le Tacon, +Charge Density Waves in YBa2Cu3 O6.67 Probed by +Resonant X-Ray Scattering under Uniaxial Compres- +sion, Phys. Rev. Lett. 126, 37002 (2021). +[88] A. Paramekanti, M. Randeria, T. V. Ramakrishnan, +and S. S. Mandal, Effective Actions and Phase Fluc- +tuations in d-Wave Superconductors, Phys. Rev. B 62, +6786 (2000). +[89] V. L. Berezinskii, Destruction of Long-Range Order in +One-Dimensional and Two-Dimensional Systems Pos- +sessing a Continuous Symmetry Group. II. Quantum +Systems, Sov. Phys. JETP 34, 610 (1972). +[90] J. M. Kosterlitz and D. J. Thouless, Ordering, Metasta- +bility and Phase Transitions in Two-Dimensional Sys- +tems, J. Phys. C Solid State Phys. 6, 1181 (1973). +[91] J. M. Kosterlitz, The Critical Properties of the Two- +Dimensional XY Model, J. Phys. C Solid State Phys. 7, +1046 (1974). +[92] N. D. Mermin, Crystalline Order in Two Dimensions, +Phys. Rev. 176, 250 (1968). +[93] P. C. Hohenberg, Existence of Long-Range Order in One +and Two Dimensions, Phys. Rev. 158, 383 (1967). +[94] B. Spivak, P. Oreto, and S. A. Kivelson, Theory +of Quantum Metal to Superconductor Transitions in +Highly Conducting Systems, Phys. Rev. B 77, 214523 +(2008). +[95] A. Kapitulnik, S. A. Kivelson, and B. Spivak, Collo- +quium: +Anomalous Metals: +Failed Superconductors, +Rev. Mod. Phys. 91, 11002 (2019). +[96] Y. Xu, H. Rong, Q. Wang, D. Wu, Y. Hu, Y. Cai, Q. +Gao, H. Yan, C. Li, C. Yin, H. Chen, J. Huang, Z. Zhu, +Y. Huang, G. Liu, Z. Xu, L. Zhao, and X. J. Zhou, Spec- +troscopic Evidence of Superconductivity Pairing at 83 +K in Single-Layer FeSe/SrTiO3 Films, Nat. Commun. +12, 2840 (2021). +[97] B. D. Faeth, S. L. Yang, J. K. Kawasaki, J. N. Nelson, +P. Mishra, C. T. Parzyck, C. Li, D. G. Schlom, and +K. M. Shen, Incoherent Cooper Pairing and Pseudogap +Behavior in Single-Layer FeSe/SrTiO3, Phys. Rev. X +11, 021054 (2021). +[98] B. L. Kang, M. Z. Shi, S. J. Li, H. H. Wang, Q. Zhang, +D. Zhao, J. Li, D. W. Song, L. X. Zheng, L. P. Nie, +T. Wu, and X. H. Chen, Preformed Cooper Pairs in +Layered FeSe-Based Superconductors, Phys. Rev. Lett. +125, 097003 (2020). +[99] V. Grinenko, D. Weston, F. Caglieris, C. Wuttke, C. +Hess, T. Gottschall, I. Maccari, D. Gorbunov, S. Zher- +litsyn, J. Wosnitza, A. Rydh, K. Kihou, C.-H. Lee, R. +Sarkar, S. Dengre, J. Garaud, A. Charnukha, R. H¨uhne, +K. Nielsch, B. B¨uchner, H.-H. Klauss, and E. Babaev, +State with Spontaneously Broken Time-Reversal Sym- +metry above the Superconducting Phase Transition, +Nat. Phys. 17, 1254 (2021). +[100] T. Dubouchet, B. Sac´ep´e, J. Seidemann, D. Shahar, +M. Sanquer, and C. Chapelier, Collective Energy Gap +of Preformed Cooper Pairs in Disordered Superconduc- +tors, Nat. Phys. 15, 233 (2019). +[101] K. M. Bastiaans, D. Chatzopoulos, J.-F. Ge, D. Cho, W. +O. Tromp, J. M. van Ruitenbeek, M. H. Fischer, P. J. de +Visser, D. J. Thoen, E. F. C. Driessen, T. M. Klapwijk, +and M. P. Allan, Direct Evidence for Cooper Pairing +without a Spectral Gap in a Disordered Superconductor +above TC, Science 374, 608 (2021). +[102] M. Mondal, A. Kamlapure, M. Chand, G. Saraswat, S. +Kumar, J. Jesudasan, L. Benfatto, V. Tripathi, and P. +Raychaudhuri, Phase Fluctuations in a Strongly Disor- +dered S-Wave Nbn Superconductor Close to the Metal- +Insulator Transition, Phys. Rev. Lett. 106, 047001 +(2011). +[103] Z. Chen, A. G. Swartz, H. Yoon, H. Inoue, T. A. +Merz, D. Lu, Y. Xie, H. Yuan, Y. Hikita, S. Raghu, +and H. Y. Hwang, Carrier Density and Disorder Tuned +Superconductor-Metal Transition in a Two-Dimensional +Electron System, Nat. Commun. 9, 4008 (2018). +[104] K. Bouadim, Y. L. Loh, M. Randeria, and N. Trivedi, +Single- and Two-Particle +Energy Gaps across the +Disorder-Driven Superconductor-Insulator Transition, +Nat. Phys. 7, 884 (2011). +[105] A. Ghosal, M. Randeria, and N. Trivedi, Role of Spatial +Amplitude Fluctuations in Highly Disordered s-Wave + +12 +Superconductors, Phys. Rev. Lett. 81, 3940 (1998). +[106] Q. Cui and K. Yang, Fulde-Ferrell-Larkin-Ovchinnikov +State in Disordered s-Wave Superconductors, Phys. +Rev. B 78, 054501 (2008). + diff --git a/jNFAT4oBgHgl3EQfaR1d/content/tmp_files/load_file.txt b/jNFAT4oBgHgl3EQfaR1d/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..a71fd20c20800f65eebfcc41a055b40271c89208 --- /dev/null +++ b/jNFAT4oBgHgl3EQfaR1d/content/tmp_files/load_file.txt @@ -0,0 +1,1734 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf,len=1733 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='08550v1 [cond-mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='supr-con] 20 Jan 2023 Superconducting fluctuations and charge-4e plaquette state at strong coupling Qiong Qin,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2 Jian-Jun Dong,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 Yutao Sheng,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2 Dongchen Huang,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2 and Yi-feng Yang1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 4,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' ∗ 1Beijing National Laboratory for Condensed Matter Physics and Institute of Physics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chinese Academy of Sciences,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Beijing 100190,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' China 2University of Chinese Academy of Sciences,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Beijing 100049,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' China 3Department of Physics and Chongqing Key Laboratory for Strongly Coupled Physics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chongqing University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chongqing 401331,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' China 4Songshan Lake Materials Laboratory,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dongguan,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Guangdong 523808,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' China (Dated: January 23,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2023) Recent experiments indicate that superconducting fluctuations also play an important role in overdoped cuprates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Here we apply the static auxiliary field Monte Carlo approach to study phase correlations of the pairing fields in a microscopic model with spin-singlet pairing interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We find that the short- and long-range phase correlations are well captured by the phase mutual in- formation, which allows us to construct a theoretical phase diagram containing the uniform d-wave superconducting region, the phase fluctuating region, the local pairing region, and the disordered region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We show that the gradual development of phase coherence has a number of consequences on spectroscopic measurements, such as the development of the Fermi arc and the anisotropy in the angle-resolved spectra, scattering rate, entropy, specific heat, and quasiparticle dispersion, in good agreement with experimental observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For strong coupling, our Monte Carlo simulation re- veals an unexpected charge-4e plaquette state with d-wave bonds, which competes with the uniform d-wave superconductivity and exhibits a U-shaped density of states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' INTRODUCTION Superconducting fluctuations have been proposed to play an important role in underdoped cuprates [1–14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Their presence may be responsible for the back bending bands above the superconducting transition temperature Tc [15], continuous variation of the spectral gap across the transition [16], and probably the large Nernst effect and diamagnetic signals [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' They have also been used to explain the mysterious pseduogap [18–22], but negated by some experiments showing that superconducting fluc- tuations only exist in a much narrower region than the pseudogap [23].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Their interplay with competing orders may be the cause of particle-hole asymmetry [24], time reversal symmetry breaking [24, 25], or rotational sym- metry breaking [26] observed in some materials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' In overdoped cuprates, however, superconducting fluc- tuations have scarcely been considered seriously [27], al- though a linear proportionality between the superfluid density and Tc has been reported and indicates a key role of phase stiffness in controlling the superconductiv- ity [28, 29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Very recently, the angle-resolved photoe- mission spectroscopy (ARPES) observation of a d-wave gap and particle-hole symmetric dispersion above Tc in overdoped Bi2Sr2CaCu2O8+δ [30–32] has stimulated in- tensive debates concerning the existence of phase fluctu- ations in overdoped cuprates and whether the observed anomalous properties are due to superconducting fluctu- ations or involve other mechanisms such as anisotropic impurity scattering [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' In this work, we explore potential consequences of su- perconducting fluctuations on the spectroscopic obser- vations in overdoped cuprates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Different from previous studies [14, 33–35], we employ a static auxiliary field Monte Carlo approach [36–41] and use phase mutual in- formation to analyze short- and long-range phase corre- lations of the superconducting pairing fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The mu- tual information [42–48] measures the nonlinear associa- tion of the probabilistic distribution [49–51] and has been sucessfully applied to various physical systems [52–57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It provides an excellent indicator of superconducting phase correlation and allows us to construct a superconducting phase diagram with the temperature and pairing inter- action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We identify three temperature scales over a wide intermediate range of the pairing interaction and deter- mine four distinct phases: the superconducting, (macro- scopic) phase fluctuating, local pairing, and disordered regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Calculations of the angle-resolved spectra, scat- tering rate, entropy, specific heat, quasiparticle disper- sion, and Fermi arc show interesting anisotropic features, beyond the mean-field theory but agreeing well with ex- periments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For sufficiently strong pairing interaction, we find a plaquette state of charge-4e pairing with a U- shaped density of states that competes with the uniform d-wave superconductivity [58].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Our work provides a sys- tematic understanding of the effects of superconducting fluctuations on the spectroscopic properties in overdoped cuprates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' MODEL AND METHOD We start with the following Hamiltonian, H = − � ilσ tilc† iσclσ − µ � iσ c† iσciσ − V � ⟨ij⟩ � ψS ij �† ψS ij, (1) 2 where the pairing interaction is written in an explicit form for the spin-singlet superconductivity with ψS ij = 1 √ 2(ci↓cj↑ − ci↑cj↓) and the strength V > 0, which may be directly derived from an antiferromagnetic spin den- sity interaction or an attractive charge density interac- tion between nearest-neighbor sites [59].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To decouple the pairing interaction, we apply the Hubbard-Stratonovich transformation and introduce the auxiliary field ∆ij [60]: V ¯ψS ijψS ij → ¯∆ijψS ij + ¯ψS ij∆ij + |∆ij|2 V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (2) The model is generally unsolvable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To proceed, we fur- ther adopt a static approximation and ignore the imagi- nary time dependence of the auxiliary fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This allows us to integrate out the fermionic degrees of freedom and simulate solely the pairing fields ∆ij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We obtain an ef- fective action: Seff(∆) = − � i ln(1 + e−βΛi) + 2βV � ⟨ij⟩ ¯∆ij∆ij, (3) where β is the inverse temperature and Λi are the eigen- values of the matrix O = � −µ − T M M ∗ µ + T � , (4) in which T is the N × N hopping matrix (N is the site number) and Mij = V ∆ij comes from the pairing term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For spin-singlet pairing, ∆ij is symmetric and defined on the bond between two nearest-neighbor sites ij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We thus have totally 2N independent complex variables sat- isfying the probabilistic distribution: p(∆) = Z−1e−Seff, Z = � D∆D ¯∆e−Seff, (5) where Z is the partition function serving as the normal- ization factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Because O is an Hermitian matrix, all its eigenvalues Λi and consequently Seff are real.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hence, the above model can be simulated using Monte Carlo with the Metropolis algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' In the following, all presented results are obtained on a 10×10 square lattice (N = 100).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Results on larger lattices [61, 62] have been examined and are qualitatively consistent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For simplicity, only the nearest-neighbor (t) and next-nearest-neighbor (t′) hop- ping parameters are included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We take t′ = −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='45t fol- lowing the common choice in the literature [63, 64] and set t as the energy unit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For real materials, t is typically of the order of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 eV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The chemical potential µ is spe- cially chosen to have the large Fermi surface of overdoped cuprates [65, 66], as plotted in the inset of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 1(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' RESULTS For comparison, we first discuss the uniform mean- field solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The pairing fields are found to satisfy FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 1: (a) The mean-field phase diagram, where Tc and ∆(0) are the superconducting transition temperature and the maximum of the momentum-dependent gap at T = 0, respec- tively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The inset gives the superconducting gap ∆k along the Fermi surface for V = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (b) Evolution of the amplitude distribution p(|∆|) for all bonds at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0008, showing one peak for moderate interaction and two peaks for strong interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (c) The peak position |∆|max of p(|∆|) as a function of the pairing interaction V , where two maxima are seen to occur for V ≥ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (d) Evolution of p(|∆|) from two-peak to one-peak structure with increasing temperature for V = 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' ∆x = −∆y, where the superscript represents the bond direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gap along the Fermi surface is shown in the inset of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 1(a), reflecting a typical dx2−y2-wave struc- ture [67].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The maximum gap size ∆(T = 0) and Tc are plotted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 1(a) and both increase with increasing pairing interaction V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The typical BCS formula of Tc is reproduced only at small V but violated for V > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5, where we find a roughly linear relation Tc ∼ V with the ratio 2∆(0)/Tc ≈ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 greater than BCS prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Thus the mean-field solution already indicates a strong- coupling nature of the superconductivity for large V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Spatial correlations of the pairing fields Our Monte Carlo simulations of the auxiliary pair- ing fields allow us to study the effect of superconduct- ing fluctuations beyond the mean-field solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fig- ure 1(b) shows the amplitude distribution of the pair- ing field p(|∆|) on all bonds at a very low temperature T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We focus on moderate and large pairing interactions where Tc is not too small for our numeri- (a) (b) 元 30 V=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 V=3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0001 25 V=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 0 +V=4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 △(0) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 20 4 元 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0008 T T 15 11 T T D c △(0) 10 2 5 0 0 0 2 4 6 8 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='9 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 V [△ / (c) (p) 0- T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='015 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='045 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 5 ¥T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='075 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='105 4 max 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 d 3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 p 2 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0008 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 V=6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 0 1 2 3 V3 cal simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For V = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6, the distributions are quite normal and can be well fitted by a Gaussian form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' But for V ≥ 4, it develops a two-peak structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Figure 1(c) summarizes the peak positions as a function of V for T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A transition occurs at V ≈ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0, separating the superconductivity into two regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We will see that they correspond to a homogeneous super- conducting state for moderate V and a spatially mod- ulated state for large V , respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 1(d), the two-peak distribution at large V is gradually suppressed with increasing temperature and becomes a single peak at sufficiently high temperatures, implying a close rela- tion between two states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We first focus on the homogeneous state for moder- ate V and study its properties from the perspective of phase correlations of the pairing fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Our tool is the joint distribution p(θi 0, θi R), where 0 ≡ (0, 0) denotes the bond attached to any origin site, R represents the rela- tive coordinate of the other bond, and i = x, y denotes the bond along x- or y-direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Figure 2(a) plots some typical results for i = x and R = (1, 0) (short-range) and (5, 5) (long-range) at different temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Due to rotational symmetries, the results are the same for i = y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' At high temperatures, we find a uniform distribu- tion due to strong thermal fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' With lowering temperature, two phases are gradually locked, as mani- fested by the maximum distribution along the diagonal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A direct comparison shows that this feature first appears on short range with R = (1, 0) and then on longe range with R = (5, 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hence, the phase coherence of the su- perconducting pairing grows gradually on the lattice to longer distance with decreasing temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To quantify the correlation, we introduce their phase mutual information defined as Ii R = � dθi 0dθi R p(θi 0, θi R) ln p(θi 0, θi R) p(θi 0)p(θi R), (6) where p(x) is the marginal distribution function of the continuous random variable x and p(x, y) is the joint probability distribution of x and y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Figure 2(b) com- pares the phase correlations as a function of tempera- ture on short and long distances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We see they all ex- hibit similar behavior below Tc = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='054 and vary ex- ponentially (dashed lines) with the temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' But for R = (5, 5), the mutual information suffers from an abrupt change and diminishes more rapidly above Tc, in- dicating a disparity between short and long-range phase correlations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Thus, Tc marks a characteristic temper- ature scale separating the phase coherence on different spatial scales, above which long-range correlations start to be suppressed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' At higher temperature Tp = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='08 for the chosen pa- rameters, a weaker slope change is found for both short and long-range correlations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To see what happens at this temperature, we apply the principal component analy- sis (PCA) to the Monte Carlo samples as collected in FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2: (a) Comparison of the joint distribution p(θx 0, θx R) for R = (1, 0) and R = (5, 5) at different temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (b) Evo- lution of the short- and long-range phase mutual information calculated from (a) as a function of temperature, showing two temperature scales Tc and Tp (vertical grey lines) from the slope change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (c) Temperature dependence of the variance of two principal components θ± R = 1 √ 2(θ0 ± θR) from PCA analyses of the data in (a) for short- and long-range phase correlations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The inset shows the results for R = (1, 0) on a larger temperature window.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (d) Power-law decay of the phase mutual information Ix R with distance |R| = |Rx| + |Ry|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The dotted lines are the fitting curves Ix R ∝ |R|−α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (e) Tempera- ture dependence of the extracted exponent α from (d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The vertical grey lines mark the transition points identified in (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' As expected, this reveals two principal direc- tions θ± R = 1 √ 2(θ0 ±θR) on the (θ0, θR) plane for all tem- peratures, with opposite temperature dependence of their variances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The superscript i is dropped because the data on both bond directions i = x, y are considered together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2(c), the decrease of var(θ− R) signifies the increase of phase locking degree on the distance R with lowering temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Interestingly, var(θ± R) become al- 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 (a) (1,0) 030 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='062 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='090 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='560 1 1 (5,5) T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='062 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='090 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='560 1 1 1 1 1 1 0/T 1元 0 0 0 0 (b) (c) 10 T T T e var(0) V=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 C p (1,0) p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Q var( (5,5) 4 (1,0) H R (0,1) var( 3 2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 R (5,5) var(0) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0) Ty (5,5) var(0) (5,5) 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='08 T T (d) (e) T 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 T 10-1 C p 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 a ILR 1 0 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='03 10-3 2 ← T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 0 1 2 3 4 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 R T4 most equal above Tp along both directions for R = (5, 5), implying a uniform distribution on the (θ0, θR) plane and hence the almost complete loss of long-range phase cor- relation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' On the other hand, the two variances still differ for R = (1, 0), indicating the existence of short-range correlation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The latter is to be suppressed only at much higher temperatures above Tl = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='25, as shown in the inset of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2(c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Thus, Tl marks a temperature scale above which no phase correlations are present (a disor- dered state).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Below Tl, the pairing fields start to develop between neighboring bonds, indicating the onset of local pairing only with short-range correlations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A long-range or macroscopic phase correlation only emerges below Tp (a phase fluctuating state) and eventually grows into a global coherent state (the superconductivity) at Tc, be- low which we can no longer distinguish short and long- range correlations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The above separation of different regions may be seen from a different angle by plotting the mutual information as a function of the “distance” |R| ≡ |Rx| + |Ry|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The results of Ix R are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2(d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We see excellent power-law decay, Ix R ∝ |R|−α, at all temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The extracted decay rate α increases with temperature but behaves differently in the three regions divided by Tc and Tp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' α varies most rapidly for Tc < T < Tp, which may be understood from the suppression of long-range phase correlation in this temperature region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For T < Tc, α approaches almost zero, indicating the presence of long- range coherence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The fact α > 2 for T > Tp implies a rapid decay due to short-range correlation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Effects on spectroscopic properties Having established how the superconductivity is de- veloped from its phase correlation, we now examine how these may be related to the experimental observations in real materials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' First of all, the d-wave nature of the superconducting pairing can be seen from the joint dis- tribution of θx 0 and θy 0 connected to the same site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' As shown in the inset of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 3(a), we find a rough correla- tion, θy 0 = θx 0 ± π, namely a sign change of the pairing fields along two perpendicular bond directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Their mutual information Ixy 0 is presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 3(a) as a function of temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Its slope changes at Tc and Tp are similar to those of Ii R between neighboring bonds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The separation of short- and long-range phase correla- tions have important consequences on the spectral prop- erties, which may be studied by assuming a twist bound- ary condition to overcome the finite size effect [68].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fig- ure 3(b) plots the total density of state at the Fermi en- ergy N(0) normalized by its high temperature value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It is almost a constant above Tp, but then decreases gradually with lowering temperature, reflecting the spectral weight depression induced by gap opening at zero energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Inter- estingly, its temperature derivative dN(0)/dT exhibits a FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 3: (a) Temperature dependence of the phase mutual in- formation Ixy 0 between the x- and y-bonds attached to the same site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The inset shows their joint phase distribution at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034, indicating d-wave correlations between two bonds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (b) The normalized total density of states N(0) at the Fermi energy and its temperature derivative dN(0)/dT as functions of temperature, showing features at Tc and Tp (grey vertical lines) determined from the phase mutual in- formation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (c) Temperature evolution of the total density of states N(ω), showing the gradual gap opening near the Fermi energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The inset illustrates the azimuthal angle φ and the positions of node and antinode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (d) Temperature dependence of the angle-resolved spectral function A(φ, 0) and its deriva- tive dA(φ, 0)/dT at the noninteraction Fermi wave vector and the Fermi energy at V = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 for φ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' maximum at around Tc, consistent with the slope change of the long-range phase mutual information Ix(y) (5,5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cor- respondingly, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 3(c), a pseudogap devel- ops gradually on N(ω) with lowering temperature over the intermediate range Tc < T < Tp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' These establish a close relation between the long-range phase correlation and spectral gap of the superconductivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Similar temperature evolution is also seen in the angle- resolved spectral function A(φ, 0) and its temperature derivative dA(φ, 0)/dT along the azimuthal angle φ at the noninteracting Fermi wave vector and zero energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For larger φ away from the antinode, the spectral func- tion grows to a maximum at lower temperature and has a higher residual value at zero temperature limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mean- while, its temperature derivative becomes more enhanced below Tc but suppressed above Tc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Such an intrinsic anisotropy has been observed in the latest ARPES ex- periment [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To clarify the origin of the anisotropy, we compare in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 4(a) the temperature dependence of the spectral (a) (b) T T 1 c p 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 @ LP/(O)Np 0 10-2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 Z 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 eN(0) T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034 T T 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 1 0 1 c 1 / AdN(O)/dT 0 0 10° 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 T T (c) (p) T dA(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2,0)/dT 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 dA(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0)/dT K node 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='10 antinode 0 A(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2,0) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 Lp/(0°Φ)vp Φ= π/4 (3) 0 A(0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3,0) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 0 k T Z x 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 OFT 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='078 0 0 1 0 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 T 35 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 4: (a) Temperature evolution of the angle-resolved spec- tral function A(φ, ω) on different positions of the Fermi sur- face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (b) Comparison of the extracted gap ∆(φ, T ) from (a) as functions of the temperature T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (c) Angular dependence of the spectral gap ∆(φ, T ) and scattering rate Γ(φ, T ) on the Fermi surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' ∆ and Γ are defined as the energy and the half-maximum half-width of the upper peak of the spectral function A(φ, ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (d) and (e) give the calculated thermal en- tropy S(φ, T ) and specific heat coefficient γ(φ, T ) as functions of temperature at different positions (φ) on the Fermi surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' functions A(φ, ω) for different azimuthal angle φ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ob- viously, they exhibit very different behaviors near nodal or antinodal directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Figure 4(b) plots the extracted spectral gap ∆(φ, T ) as a function of temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' With increasing temperature, the gap closes first near the nodal direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Thus as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 4(c), it only sat- isfies the ideal d-wave form ∆(φ, T ) ∝ cos(2φ) (green dashed line) at sufficiently low temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This is be- yond the mean-field approximation but reflects the effect of phase fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Consequently, the scattering rate Γ(φ) estimated from the half-maximum half-width of the upper peak of A(φ, ω) also exhibits smaller values near the node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The anisotropy of the spectral functions has an effect on the angle-resolved thermal entropy S(φ, T ) and the specific heat coefficient γ(φ, T ) = dS(φ, T )/dT by us- ing S(φ, T ) = − � dωA(φ, ω)[f ln f + (1 − f) ln(1 − f)], FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 5: (a) Intensity plot of the spectral function A(ky, ω) for kx = −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='047 at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='022, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='070, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='098.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (b) Extracted dispersions from the spectral functions at different temper- atures, showing back bending even above Tc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The vertical grey lines mark the Fermi wave vector ky = ±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4712.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (c) The dispersions near antinode and node for T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='066 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For comparison, all curves are shifted such that the Fermi wave vectors are located at ky = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 (grey line).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For clarity, only the lower (negative energy) parts of the super- conducting dispersions are shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (d) Length of the Fermi arc l(φ) as a function of temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The green arrows mark Tc and Tp, and the dashed line is a guide to the eye.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The inset shows Lorentzian fit of the angle-dependent spectral function A(φ, 0) on the Fermi surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (e) Intensity plot of the spectral function A(k, 0) at zero energy in the first Brillouin zone for different temperatures, showing gradual development of the Fermi arc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' where f is the Fermi distribution function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' As shown in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 4(d) and 4(e), the resulting S(φ, T ) and γ(φ, T ) exhibit similar temperature and angle dependence as A(φ, 0) and dA(φ, 0)/dT in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 3(d), which agree well with the entropy reduction and specific heat anisotropy reported in latest ARPES experiment [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To further compare with experiment [31], Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 5(a) plots the energy-momentum dependent spectral function (a) (b 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='022 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='022 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 1 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='070 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='098 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='070 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='098 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 K E y (c) (d) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='010 0 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034 8 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='066 8 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='086 A2 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 near antinode near node T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='066 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='066 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 T T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034 p 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 k T y 2 3 0 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='7 4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 (e) T T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='018 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='058 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='090 T 0 0 T-T T-T 0 T k k k0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='078 T (a) Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='74 (mΦ)v 0 0 0 0 1 0 3 3 3 (b) (c) e Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='15 △A, T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='010 Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='31 △, T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='050 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='46 T, T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='050 Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='62 (Φ, Q 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 Q 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 △0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='04 r T 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 Φ (p) (e) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='14 Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='15 Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='23 Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='23 74) 母 Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='31 Φ Φ=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='31 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='06 S d 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='08 T T6 A(ky, E) at fixed kx = −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='047, which allows us to ex- tract the energy of the maxima for each ky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The resulting dispersions are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 5(b) for T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='098, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We see the dispersion exhibits back bending even for T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 > Tc but almost recovers the normal state one for T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='098 > Tp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The vector kG where the bend- ing occurs is the same as the Fermi vector kF = ±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='4712 (the grey vertical line), which differs from the predic- tion based on density wave or magnetic order pictures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The extracted dispersion also manifests anisotropy due to phase fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 5(c), the dispersion near kF (the grey vertical line) shows angle dependent gap at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='034, but a clear node-antinode dichotomy at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='066, with the near-node dispersion crossing the Fermi energy and the near-antinode dispersion exhibit- ing a gap and back bending, as reported previously in underdoped experiments [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The effect of the phase correlation is also reflected in the topology of the Fermi surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' As shown in the in- set of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 5(d), the angle-dependent spectral function A(φ, 0) is gradually suppressed away from the nodal point with lowering temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This leads to a variation of the Fermi arc [69–71], whose length l(φ), estimated from the 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6-maximum width of the spectral peak, is plotted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 5(d) as a function of temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We see l(φ) almost saturates below Tc, increases linearly with tem- perature in the intermediate region, and reaches a full length (Fermi surface) at high temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This con- firms its connection with the phase correlation identified using the phase mutual information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Such a temperature variation of the Fermi arc length has been observed in scanning tunneling spectroscopy (STS) experiment [20], implying that the zero arc length reported in the ARPES experiments [72] might originate from the peaks of the ar- tificially symmetrized A(φ, ω).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To be specific, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 5(e) maps out the zero-energy spectral function A(k, 0) in the first Brillouin zone and we see a clear evolution from the Fermi arc at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='058 to the Fermi surface T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This variation indicates that the Bogoliubov quasiparti- cle appears at different temperatures in different regions of the Fermi surfaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The arc is more broadened close to the node, consistent with previous experiment [73].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The superconducting phase diagram and a strong-coupling plaquette state Having identified the different regions of phase correla- tions at a fixed V , we now turn to their variation with the pairing interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' As shown in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 6(a) and 6(b), the range of exponential temperature dependence also varies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' As V increases, the curves first move to higher tempera- tures, but then shift somewhat backwards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Such a non- monotonic variation is better seen in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 6(c) and 6(d), where the phase mutual information Ix (1,0) and Ix (5,5) are replotted as a function of V for different temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 6: Comparison of the short- and long-range phase mu- tual information with R = (1, 0) and (5, 5) (a)(b) as func- tions of temperature for different pairing interactions, and (c)(d) as functions of the pairing interactions for different tem- peratures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (e) The superconducting phase diagram with Tc and Tp determined from the phase mutual information and T2 from the onset of two-peak amplitude distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (f) Comparison of the condensation energy Eg for the uniform mean-field solution and the static auxiliary field Monte Carlo (SAF-MC) solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Also shown in the variance of the ampli- tude distribution var(|∆|) from the Monte Carlo simulations at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Both exhibit nonmonotonic behavior with increasing V at low temperatures, indicating that the phase correla- tions are suppressed when the pairing interaction is get- ting too large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' As we will see, this is closely associated with the two-peak structure of the amplitude distribution in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 1(d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Taken together, a superconducting phase diagram can be constructed and shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 6(e), where both Tc and Tp behave nonmonotonically with V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Also shown is a third temperature scale T2, below which the amplitude distribution has two peaks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' T2 only appears for suffi- ciently large V , indicating a strong coupling limit whose nature will be clarified later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Interestingly, we see that (a) (b) 10 0 0 V=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='9 ← V=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 e V=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='9 →V=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 10° V=2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 AV=3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 V=2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 A V=3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 V=5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='3 V=5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content="3 = (0') (5,5) X Q 2 10° Q Q Q 00 10 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='11 T T (c) (p) (5,5) X 0 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='030 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='030 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='050 含T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='050 Φ- T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='075 Φ- T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='075 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='125 T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='125 10 4 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 V V (e) (f) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='10 20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='6 0000 [△)) uniform 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='06 E T 0 DIBA SAF-MC 60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='04 var(I) p 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='02 2 00 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 2 4 6 8 V7 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7: (a) Typical configurations of the pairing fields at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='001 for V = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='7, 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The square size represents their amplitude and the colors mark the sign of their phase θ ∈ (−π, π].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (b) Evolution of the total density of state N(ω) with pairing interactions at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='025, showing a crossover from V -shape to U-shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' (c) Temperature dependence of N(ω) at V = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The inset shows the joint distribution of θx 0 and θy 0 at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='02, indicating a d-wave form of the pairing fields on the bonds in the plaquette state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tc takes its maximum near the critical V of the two-peak distribution and is suppressed as T2 increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This sug- gests that the superconductivity is competing with this strong coupling state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To clarify this issue, we compare in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7(a) typical Monte Carlo configurations of the pairing fields for weak, intermediate, and strong V at T = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The size of the square represents the amplitude |∆| and the color denotes the sign of the phase θ ∈ (−π, π].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For weak V = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5, the distribution on the lattice is random, re- flecting that the system is not yet in a phase coherent region (T > Tc).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For intermediate V = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='7, we find a uniform distribution of the amplitude, while the phase changes sign periodically and exhibits a d-wave pattern.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It is straightforward to identify this state as the uniform d-wave superconductivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For strong V = 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1, the ampli- tude distribution is no longer uniform but exhibits cluster patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We call it a charge-4e d-wave plaquette state since it is formed of local plaquettes [74] with 4 bonds of large |∆| in a unit cell surrounded by weak bonds in a 2 × 2 cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The plaquette has the same sign structure as the d-wave superconductivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The whole state can be regarded as weakly connected charge-4e plaquettes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Clearly, this is not a phase separation and the two-peak feature of the amplitude distribution is a reflection of the special plaquette structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This state breaks the trans- lational invariance of the pairing fields, but keeps the uniform distribution of the electron densities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It persists to a very large V = 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5, beyond which the bonds become less correlated as t/V → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' To show that the plaquette state is stable over the uni- form superconductivity, we calculate their condensation energies using Eg = � l |ξl| + � ⟨ij⟩ 2 ¯∆ij∆ij V − � l Λl, (7) where l = 1, 2, · · · , N and ξl is the eigenvalue of the non-interacting Hamiltonian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Figure 6(f) compares the condensation energies of the mean-field uniform solution and the Monte Carlo solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For small V , we see they are almost equal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' But beyond the critical V of the pla- quette state, the mean-field uniform solution has higher energy than the Monte Carlo (plaquette) solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' In this region, the variance var(|∆|) of the amplitude dis- tribution grows rapidly with increasing V , reflecting an increasing difference between the strong and weak bonds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The transition to the plaquete state may be detected from the V-shape-to-U-shape change of the density of state as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Figure 7(c) plots N(ω) at V = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='9 for different temperatures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The plaquette state melts as N(ω) changes from U-shape to V-shape with increasing temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Note that a U-shaped curve is typically ascribed to s-wave superconductivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' How- ever, the plaquette state still exhibits d-wave bonds with θx 0 = θy 0 ± π as shown in the inset of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7(c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Simi- lar variation has been observed in STS measurement in twisted trilayer graphene [58], where it was argued to originate from two-particle bound states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' In our simu- lations, the four-particle plaquette state is more favored with nearest-neighbour pairing interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It has been suggested that strong attractive interaction may always lead to phase separation [75–79].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It could be that the pairing interaction for the plaquette state is not yet strong enough.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For sufficiently large V , we find randomly distributed dimers and plaquettes, possibly be- cause the pairing correlations are suppressed as t/V be- comes too small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The plaquette state may be in some sense related to pair density wave (PDW) [80–82].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' But our derived configuration is special.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It does no induce any charge density wave and may only be produced by a complicated combination of uniform superconductivity and bidirectional PDW states of the wave vector (0, π) and (π, 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It may thus be better viewed as a different strong-coupling limit of the d-wave superconductivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' DISCUSSION AND CONCLUSIONS We have applied the static auxiliary field Monte Carlo method to study phase correlations of the superconduct- ing pairing fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' We can reproduce the weak-coupling (a) V=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='5 V=3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='7 V=6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 10 8 9 4 2 起 + + 1 2 4 6 8 10 2 4 6 8 10 2 4 6 8 10 x X X (b) (c) T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='02 V=3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 V=4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 V=4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 V=6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 (3)N 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='2 (3)N T=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='025 1 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='1 V=4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='9 0 T 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='12 0 2 1 0 1 2 2 1 0 1 2 3 38 BCS solution of the mean-field theory and identify a re- gion above Tc by separation between short- and long- range phase correlations for moderate and strong pair- ing interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This phase fluctuating region above the uniform d-wave superconductivity has a number of spec- troscopic features including the anisotropy of the angle- resolved gap opening, scattering rate, and specific heat coefficient, as well as gradual development of the Fermi arc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The angular or momentum dependence of the gap opening temperature may be a general feature of phase fluctuations for all kinds of orders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For sufficiently strong pairing interaction, our simulation reveals a competing charge-4e plaquette state with d-wave-like bonds and a U-shaped density of states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The superconducting transi- tion temperature seems maximal near the critical pairing interaction of the plaquette state, raising an interesting question concerning their relationship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It should be mentioned that we begin the calcula- tions with an attractive spin-singlet pairing interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This form of Hamiltonian can be derived naturally from nearest-neighbor antiferromagnetic spin fluctuation in- teraction, which has been argued to provide a holistic picture for all cuprates [83].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' But the superconductiv- ity may be suppressed by magnetic order if the electron density is close to half filling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Here we only consider the overdoped region, where the magnetic long-range order is not important and we only have to deal with the auxiliary pairing fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Since onsite pairing is not supported due to strong onsite Coulomb repulsion, it is reasonable to consider the pairing fields only between nearest-neighbor sites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The agreement of our results with spectroscopic experiment confirms this spin fluctuation model in over- doped cuprates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' On the other hand, attractive charge density interaction [84, 85] may also yield similar pair- ing interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' However, phenomenological interactions of charge fluctuations typically give a positive nearest- neighbour charge density interaction [59, 86].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hence, more exotic forms are needed in order to explain cuprates using charge interaction, which may be in conflict with the X-ray experiment [87].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' It may be useful to compare our results of the uniform superconductivity with the XY model which is believed to describe the physics of two-dimensional superconduc- tivity [10, 12, 88].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' For this purpose, we have to first define the superconducting order parameter on the lattice sites, namely ∆i = 1 4(∆i,i+x +∆i,i−x −∆i,i+y −∆i,i−y), where ∆i,i±x and ∆i,i±y are the pairing fields on the four bonds connected to site i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The number of vortices can then be calculated using ∆i following the standard definition [13] and found to be nearly zero below Tc, grow rapidly between Tc and Tp and slowly above Tp, and eventually saturate above Tl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The rapid increase above Tc is in good correspondence with that predicted for the Berezinskii- Kosterlitz-Thouless (BKT) transition due to the unbind- ing of vortices and antivortices [89–91], indicating that our Tc is exactly the BKT transition temperature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' The power law decay of the phase mutual information indi- cates a quasi-long range order that does not break U(1) symmetry conforming to the well-known Mermin-Wagner theorem [92, 93].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Our identification of three tempera- ture scales and four distinct regions may offer some in- sight into the triple transition in resistance experment [27], where normal metal, pseudogap (incoherent metal), phase fluctuation, and superconductivity are separated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A similar scenario may also be related to the transition between superconductivity and normal metal, where dis- order or magnetic field may broaden the transition and lead to one or two intermediate regions [94, 95].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Superconducting phase fluctuations also play an im- portant role in other superconductors, such as Fe-based superconductors [96–99] and disordered conventional su- perconductors [100–106].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Our method may also provide useful insight into the interplay between phase fluctua- tions and other important effects such as disorder, multi- band, and time reversal symmetry breaking in these sys- tems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' This work was supported by the National Natural Sci- ence Foundation of China (NSFC Grants No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 11974397, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 12174429, and No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 12204075), the National Key Re- search and Development Program of China (Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2022YFA1402203), and the Strategic Priority Research Program of the Chinese Academy of Sciences (Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' XDB33010100).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' ∗ yifeng@iphy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='cn [1] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Emery and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kivelson, Importance of Phase Fluctuations in Superconductors with Small Superfluid Density, Nature 374, 434 (1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [2] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Franz and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Millis, Phase Fluctuations and Spectral Properties of Underdoped Cuprates, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 58, 14572 (1998).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [3] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kwon, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dorsey, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hirschfeld, Ob- servability of Quantum Phase Fluctuations in Cuprate Superconductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 86, 3875 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [4] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Samokhin and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mitrovi´c, Nodal Quasiparticles and Classical Phase Fluctuations in d-Wave Supercon- ductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 92, 057002 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [5] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Norman, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kanigel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Randeria, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chatter- jee, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Campuzano, Modeling the Fermi Arc in Underdoped Cuprates, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 76, 174501 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [6] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Berg and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Altman, Evolution of the Fermi Surface of d-Wave Superconductors in the Presence of Thermal Phase Fluctuations, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 99, 247001 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [7] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Teˇsanovi´c, d-Wave Duality and Its Reflections in High-Temperature Superconductors, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 4, 408 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [8] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Banerjee, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ramakrishnan, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dasgupta, Effect of Pairing Fluctuations on Low-Energy Electronic Spectra in Cuprate Superconductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 84, 144525 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [9] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Allais, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chowdhury, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sachdev, Connecting High-Field Quantum Oscillations to Zero-Field Electron Spectral Functions in the Underdoped Cuprates, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 9 Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 5, 5771 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [10] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Eckl, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Scalapino, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Arrigoni, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hanke, Pair Phase Fluctuations and the Pseudogap, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 66, 140510(R) (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mayr, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Alvarez, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S¸en, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dagotto, Phase Fluctuations in Strongly Coupled d-Wave Superconduc- tors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 94, 217001 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [12] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Han, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, Pseudogap and Fermi- Arc Evolution in the Phase-Fluctuation Scenario, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 82, 052503 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [13] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhong, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Han, Monte Carlo Study of Thermal Fluctuations and Fermi-Arc Formation in d- Wave Superconductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 84, 024522 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [14] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Singh, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kadge, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bang, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Majumdar, Fermi Arcs and Pseudogap Phase in a Minimal Micro- scopic Model of d-Wave Superconductivity, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 105, 054501 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [15] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kanigel, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chatterjee, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Randeria, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nor- man, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Koren, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kadowaki, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Campuzano, Evidence for Pairing above the Transition Temperature of Cuprate Superconductors from the Electronic Dis- persion in the Pseudogap Phase, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 101, 137002 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [16] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ding, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yokoya, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Campuzano, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Takahashi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Randeria, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Norman, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mochiku, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kadowaki, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Giapintzakis, Spectroscopic Evidence for a Pseu- dogap in the Normal State of Underdoped High-Tc Su- perconductors, Nature 382, 51 (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [17] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Komiya, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ono, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ando, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gu, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ong, Diamagnetism and Cooper Pairing above Tc in Cuprates, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 81, 054510 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [18] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gomes, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Pasupathy, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Pushp, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ono, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ando, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yazdani, Visualizing Pair Formation on the Atomic Scale in the High-Tc Superconductor Bi2Sr2CaCu2O8+δ, Nature 447, 569 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [19] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rameau, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Johnson, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Valla, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tsvelik, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gu, Emergence of Preformed Cooper Pairs from the Doped Mott Insulating State in Bi2Sr2CaCu2O8+δ, Nature 456, 77 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [20] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lee, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fujita, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Schmidt, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kim, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Eisaki, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Uchida, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Davis, Spectroscopic Fingerprint of Phase-Incoherent Superconductivity in the Underdoped Bi2Sr2CaCu2O8+δ, Science 325, 1099 (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [21] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bilbro, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Aguilar, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Logvenov, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Pelleg, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Boˇzovi´c, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Armitage, Temporal Correlations of Superconductivity above the Transition Temperature in La2−xSrxCuO4 Probed by Terahertz Spectroscopy, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7, 298 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [22] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhou, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Liu, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sochnikov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bollinger, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Han, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' He, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Boˇzovi´c, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Natelson, Electron Pairing in the Pseudogap State Revealed by Shot Noise in Copper Oxide Junctions, Nature 572, 493 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [23] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kondo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hamaya, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Palczewski, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Takeuchi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xu, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Schmalian, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kaminski, Disentangling Cooper-Pair Formation above the Transition Temperature from the Pseudogap State in the Cuprates, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7, 21 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [24] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' He, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hashimoto, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Karapetyan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Koralek, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hinton, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Testaud, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nathan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yoshida, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yao, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tanaka, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Meevasana, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Moore, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mo, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ishikado, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Eisaki, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hussain, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Devereaux, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kivelson, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Orenstein, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kapitulnik, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shen, From a Single-Band Metal to a High- Temperature Superconductor via Two Thermal Phase Transitions, Science 331, 1579 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [25] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kaminski, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rosenkranz, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fretwell, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cam- puzano, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Raffy, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cullen, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' You, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ol- son, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Varma, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H¨ochst, Spontaneous Break- ing of Time-Reversal Symmetry in the Pseudogap State of a High-Tc Superconductor, Nature 416, 610 (2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [26] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gupta, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' McMahon, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sutarto, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gong, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wei, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shen, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' He, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ma, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dragomir, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gaulin, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hawthorn, Vanishing Nematic Or- der beyond the Pseudogap Phase in Overdoped Cuprate Superconductors, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Natl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 118, e2106881118 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [27] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rourke, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mouzopoulou, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Panagopoulos, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Vignolle, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Proust, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kurganova, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zeitler, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tanabe, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Adachi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Koike, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hussey, Phase-Fluctuating Superconductivity in Overdoped La2−xSrxCuO4, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7, 455 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [28] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bozovic, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' He, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wu, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bollinger, Depen- dence of the Critical Temperature in Overdoped Copper Oxides on Superfluid Density, Nature 536, 309 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [29] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mahmood, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' He, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' BoˇZovi´c, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Armitage, Locating the Missing Superconducting Electrons in the Overdoped Cuprates La2−xSrxCuO4, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 122, 027003 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [30] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' He, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Di Chen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhao, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Song, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yoshida, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Eisaki, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Meingast, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Devereaux, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Birgeneau, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hashimoto, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lee, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shen, Superconducting Fluctuations in Overdoped Bi2Sr2CaCu2O8+δ, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X 11, 031068 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [31] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Di Chen, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hashimoto, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' He, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Song, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' He, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ishida, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Eisaki, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zaanen, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Devereaux, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lee, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lu, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shen, Unconventional Spectral Signature of Tc in a Pure d-Wave Supercon- ductor, Nature 601, 562 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [32] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zou, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hao, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Luo, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ye, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gao, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xu, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cai, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lin, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhou, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lee, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, Particle–Hole Asymmetric Superconducting Co- herence Peaks in Overdoped Cuprates, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 18, 551 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [33] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhang, and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, Anisotropic Scattering Caused by Apical Oxygen Vacancies in Thin Films of Overdoped High-Temperature Cuprate Super- conductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 128, 137001 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [34] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kivelson, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lee, Superconductor- to-Metal Transition in Overdoped Cuprates, npj Quan- tum Mater.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 6, 36 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [35] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' W´ei, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xiang, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Andr´e-Marie, Non-Fermi Liquid Phase and Linear-in-Temperature Scattering Rate in Overdoped Two-Dimensional Hubbard Model, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Natl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 119, e2115819119 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [36] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dong, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Huang, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yang, Mutual Informa- tion, Quantum Phase Transition and Phase Coherence in Kondo Systems, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 104, L081115 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [37] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mukherjee, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Patel, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dong, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Johnston, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Moreo, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dagotto, Testing the Monte Carlo-Mean Field Approximation in the One-Band Hubbard Model, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 90, 205113 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [38] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Liang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Moreo, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dagotto, Nematic State of Pnictides Stabilized by Interplay between Spin, Orbital, and Lattice Degrees of Freedom, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 111, 047004 (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [39] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dubi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Meir, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Avishai, Nature of the 10 Superconductor-Insulator Transition in Disordered Su- perconductors, Nature 449, 876 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [40] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Pasrija, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chakraborty, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kumar, Effective Hamiltonian Based Monte Carlo for the BCS to BEC Crossover in the Attractive Hubbard Model, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 94, 165150 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [41] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Karmakar, Pauli Limited d-Wave Superconductors: Quantum Breached Pair Phase and Thermal Transi- tions, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Condens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Matter 32, 405604 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [42] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cover and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Thomas, Elements of Information Theory (Wiley Series in Telecommunications and Signal Processing) (Wiley-Interscience, USA, 2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [43] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kraskov, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' St¨ogbauer, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Grassberger, Esti- mating mutual information, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' E 69, 066138 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [44] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Varanasi, Estimation of the Information by an Adaptive Partitioning of the Observation Space Georges, IEEE Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Theory 45, 1315 (1999).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [45] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gao, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ver Steeg, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Galstyan, Efficient Esti- mation of Mutual Information for Strongly Dependent Variables, Proceedings of the 18th International Con- ference on Artificial Intelligence and Statistics, 38, 277 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [46] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Khan, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bandyopadhyay, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ganguly, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Saigal, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Erickson, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Protopopescu, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ostrouchov, Relative Performance of Mutual Information Estima- tion Methods for Quantifying the Dependence among Short and Noisy Data, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' E 76, 026209 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [47] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Belghazi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Baratin, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rajeswar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ozair, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bengio, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Courville, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hjelm, Mutual Informa- tion Neural Estimation, Proceedings of the 35th Inter- national Conference on Machine Learning, PMLR 80, 531 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [48] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Poole, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ozair, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Oord, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Alemi, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tucker, On Variational Bounds of Mutual Informa- tion, Proceedings of the 36th International Conference on Machine Learning, PMLR 97, 5171 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [49] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Speed, A Correlation for the 21st Century (Science (2011) (1502)), Science 334, 1502 (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [50] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Reshef, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Reshef, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Finucane, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Grossman, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mcvean, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Turnbaugh, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lander, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mitzenmacher, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sabeti, Detecting Novel Associations in Large Data Sets, Science 334, 1518 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [51] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kinney and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Atwal, Equitability, Mutual Information, and the Maximal Information Coefficient, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Natl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 111, 3354 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [52] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Koch-Janusz and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ringel, Mutual Information, Neural Networks and the Renormalization Group, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 14, 578 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [53] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nir, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sela, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Beck, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bar-Sinai, Machine- Learning Iterative Calculation of Entropy for Physical Systems, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Natl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 117, 30234 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [54] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G¨okmen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ringel, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Huber, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Koch-Janusz, Statistical Physics through the Lens of Real-Space Mutual Information, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 127, 240603 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [55] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Parisen Toldin, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sato, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Assaad, Mutual Information in Heavy-Fermion Systems, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 99, 155158 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [56] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Walsh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Charlebois, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S´emon, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sordi, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tremblay, Information-Theoretic Measures of Super- conductivity in a Two-Dimensional Doped Mott Insu- lator, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Natl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Acad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 118, e2104114118 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [57] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nicoletti and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Busiello, Mutual Information Disentangles Interactions from Changing Environments, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 127, 228301 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [58] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kim, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Choi, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lewandowski, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Thomson, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhang, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Polski, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Watanabe, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Taniguchi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Al- icea, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nadj-Perge, Evidence for Unconventional Superconductivity in Twisted Trilayer Graphene, Na- ture 606, 494 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [59] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Monthoux, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Pines, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lonzarich, Supercon- ductivity without Phonons, Nature 450, 1177 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [60] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Coleman, Introduction to Many-body Physics, (Cam- bridge University Press, Cambridge, England, 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [61] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kumar and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Majumdar, A Travelling Cluster Ap- proximation for Lattice Fermions Strongly Coupled to Classical Degrees of Freedom, Eur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 50, 571 (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [62] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mukherjee, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Patel, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bishop, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dagotto, Parallelized Traveling Cluster Approximation to Study Numerically Spin-Fermion Models on Large Lattices, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' E 91, 063303 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [63] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Carbotte, Properties of a Two-Dimensional D- Wave Superconductor from Phenomenological Suscep- tibility, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 49, 4176 (1994).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [64] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Monthoux and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lonzarich, Magnetically Me- diated Superconductivity in Quasi-Two and Three Di- mensions, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 63, 054529 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [65] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Plat´e, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mottershead, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Elfimov, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Peets, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Liang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bonn, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hardy, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chi- uzbaian, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Falub, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shi, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Patthey, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dam- ascelli, Fermi Surface and Quasiparticle Excitations of Overdoped Tl2Ba2CuO6+δ, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 95, 077001 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [66] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Vignolle, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Carrington, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cooper, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' French, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mackenzie, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Jaudet, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Vignolles, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Proust, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hussey, Quantum Oscillations in an Overdoped High-Tc Superconductor, Nature 455, 952 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [67] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Monthoux, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' V Balatsky, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Pines, Toward a Theory of High-Temperature Superconductivity in the Antiferromagnetically Correlated Cuprate Oxides, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 67, 3448 (1991).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [68] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cheng, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Paiva, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='-Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lin, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mondaini, Giant Magnetoresistance in Hubbard Chains, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 121, 020403 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [69] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Harrison, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' McDonald, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Singleton, Cuprate Fermi Orbits and Fermi Arcs: The Effect of Short- Range Antiferromagnetic Order, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 99, 206406 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [70] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Alvarez and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dagotto, Fermi Arcs in the Supercon- ducting Clustered State for Underdoped Cuprate Super- conductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 101, 177001 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [71] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Greco, Evidence for Two Competing Order Param- eters in Underdoped Cuprate Superconductors from a Model Analysis of Fermi-Arc Effects, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 103, 217001 (2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [72] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kanigel, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Norman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Randeria, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chatterjee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Souma, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kaminski, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fretwell, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rosenkranz, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shi, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sato, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Takahashi, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Raffy, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kadowaki, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hinks, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ozyuzer, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Campuzano, Evolution of the Pseudogap from Fermi Arcs to the Nodal Liquid, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 2, 447 (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [73] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Reber, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Plumb, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sun, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cao, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' McElroy, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Iwasawa, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Arita, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xu, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 11 Gu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yoshida, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Eisaki, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Aiura, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dessau, The Origin and Non-Quasiparticle Nature of Fermi Arcs in Bi2Sr2CaCu2O8+δ, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 8, 606 (2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [74] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Danilov, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' van Loon, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Brener, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Iskakov, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Katsnelson, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lichtenstein, Npj Quantum Mater.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7, 50 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [75] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nazarenko, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Moreo, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dagotto, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Riera, Superconductivity in a Model of Correlated Fermions, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 54, R768 (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [76] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shaw and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Su, Phase Separation Due to Nearest Neighbor Attractive Interactions in a Two- Dimensional Model, Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 17, 853 (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [77] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Su, Phase Separation and d-Wave Superconduc- tivity in a Two-Dimensional Extended Hubbard Model with Nearest-Neighbor Attractive Interaction, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 69, 012506 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [78] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Emery, Phase Separation in t-J model, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 64, 475 (1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [79] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cookson and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Stirk, Indications of dx2−y2 Superconductivity in the Two Dimensional t-J Model, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 70, 682 (1993).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [80] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lee, Amperean Pairing and the Pseudogap Phase of Cuprate Superconductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X 4, 031017 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [81] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Setty, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fanfarillo, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hirschfeld, Micro- scopic Mechanism for Fluctuating Pair Density Wave, arXiv:2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='13138 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [82] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Setty, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhao, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fanfarillo, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Huang, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hirschfeld, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phillips, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yang, Exact Solution for Finite Center-of-Mass Momentum Cooper Pairing, arXiv:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='10568(2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [83] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rebec, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Jia, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hashimoto, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Moritz, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Moore, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Devereaux, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shen, Anomalously Strong Near-Neighbor Attraction in Doped 1D Cuprate Chains, Science 373, 1235 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [84] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Plonka, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Jia, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Moritz, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Devereaux, Fidelity Study of Superconductivity in Extended Hubbard Models, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 92, 024503 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [85] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Jiang, Enhancing d-Wave Superconductivity with Nearest-Neighbor Attraction in the Extended Hubbard Model, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 105, 024510 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [86] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Monthoux and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lonzarich, Density-Fluctuation- Mediated Superconductivity, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 69, 064517 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [87] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kim, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lefran¸cois, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kummer, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fumagalli, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Brookes, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Betto, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nakata, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tortora, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Porras, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Loew, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Barber, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Braicovich, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mackenzie, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hicks, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Keimer, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Minola, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Le Tacon, Charge Density Waves in YBa2Cu3 O6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='67 Probed by Resonant X-Ray Scattering under Uniaxial Compres- sion, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 126, 37002 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [88] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Paramekanti, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Randeria, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ramakrishnan, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mandal, Effective Actions and Phase Fluc- tuations in d-Wave Superconductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 62, 6786 (2000).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [89] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Berezinskii, Destruction of Long-Range Order in One-Dimensional and Two-Dimensional Systems Pos- sessing a Continuous Symmetry Group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Quantum Systems, Sov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' JETP 34, 610 (1972).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [90] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kosterlitz and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Thouless, Ordering, Metasta- bility and Phase Transitions in Two-Dimensional Sys- tems, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C Solid State Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 6, 1181 (1973).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [91] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kosterlitz, The Critical Properties of the Two- Dimensional XY Model, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C Solid State Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7, 1046 (1974).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [92] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mermin, Crystalline Order in Two Dimensions, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 176, 250 (1968).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [93] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hohenberg, Existence of Long-Range Order in One and Two Dimensions, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 158, 383 (1967).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [94] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Spivak, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Oreto, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kivelson, Theory of Quantum Metal to Superconductor Transitions in Highly Conducting Systems, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 77, 214523 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [95] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kapitulnik, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kivelson, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Spivak, Collo- quium: Anomalous Metals: Failed Superconductors, Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 91, 11002 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [96] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rong, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cai, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gao, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yin, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Huang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Huang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Liu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhao, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhou, Spec- troscopic Evidence of Superconductivity Pairing at 83 K in Single-Layer FeSe/SrTiO3 Films, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 12, 2840 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [97] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Faeth, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kawasaki, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nelson, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mishra, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Parzyck, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Schlom, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shen, Incoherent Cooper Pairing and Pseudogap Behavior in Single-Layer FeSe/SrTiO3, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X 11, 021054 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [98] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zhao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Li, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Song, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zheng, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nie, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wu, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chen, Preformed Cooper Pairs in Layered FeSe-Based Superconductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 125, 097003 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [99] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Grinenko, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Weston, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Caglieris, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wuttke, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hess, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gottschall, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Maccari, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Gorbunov, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Zher- litsyn, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Wosnitza, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rydh, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kihou, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lee, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sarkar, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dengre, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Garaud, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Charnukha, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H¨uhne, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Nielsch, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B¨uchner, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Klauss, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Babaev, State with Spontaneously Broken Time-Reversal Sym- metry above the Superconducting Phase Transition, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 17, 1254 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [100] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Dubouchet, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sac´ep´e, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Seidemann, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Shahar, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Sanquer, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chapelier, Collective Energy Gap of Preformed Cooper Pairs in Disordered Superconduc- tors, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 15, 233 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [101] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bastiaans, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chatzopoulos, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content='-F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ge, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cho, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tromp, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' van Ruitenbeek, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Fischer, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' de Visser, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Thoen, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Driessen, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Klapwijk, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Allan, Direct Evidence for Cooper Pairing without a Spectral Gap in a Disordered Superconductor above TC, Science 374, 608 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [102] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Mondal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kamlapure, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chand, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Saraswat, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Kumar, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Jesudasan, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Benfatto, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Tripathi, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Raychaudhuri, Phase Fluctuations in a Strongly Disor- dered S-Wave Nbn Superconductor Close to the Metal- Insulator Transition, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 106, 047001 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [103] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Chen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Swartz, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yoon, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Inoue, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Merz, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Xie, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yuan, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hikita, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Raghu, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Hwang, Carrier Density and Disorder Tuned Superconductor-Metal Transition in a Two-Dimensional Electron System, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 9, 4008 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [104] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Bouadim, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Loh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Randeria, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Trivedi, Single- and Two-Particle Energy Gaps across the Disorder-Driven Superconductor-Insulator Transition, Nat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 7, 884 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [105] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Ghosal, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Randeria, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Trivedi, Role of Spatial Amplitude Fluctuations in Highly Disordered s-Wave 12 Superconductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' 81, 3940 (1998).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' [106] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Cui and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Yang, Fulde-Ferrell-Larkin-Ovchinnikov State in Disordered s-Wave Superconductors, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} +page_content=' B 78, 054501 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/jNFAT4oBgHgl3EQfaR1d/content/2301.08550v1.pdf'} diff --git a/jdA0T4oBgHgl3EQfIv9Y/content/2301.02079v1.pdf b/jdA0T4oBgHgl3EQfIv9Y/content/2301.02079v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4f226b3d657f7c328396b24541d571b8d2195e6f --- /dev/null +++ b/jdA0T4oBgHgl3EQfIv9Y/content/2301.02079v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bae5c667e136b447c7a5bf825a0ef6b51aea8bb6fbfbc4d82271d9b854e7cb1 +size 4269417 diff --git a/jdA0T4oBgHgl3EQfIv9Y/vector_store/index.faiss b/jdA0T4oBgHgl3EQfIv9Y/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..073868e61462fdcb944ccb37d794b3317f54d9e0 --- /dev/null +++ b/jdA0T4oBgHgl3EQfIv9Y/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feb837b2b5d6ad26856bcedd175f0621497dc6969c919d64351368e57509d033 +size 2752557 diff --git a/jdA0T4oBgHgl3EQfIv9Y/vector_store/index.pkl b/jdA0T4oBgHgl3EQfIv9Y/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..30521492bba65c9213b7f01e2c217030c78689d0 --- /dev/null +++ b/jdA0T4oBgHgl3EQfIv9Y/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c4649469a2b475078b65a7ea367c630269d3fb2cb24ddd98997cc7c64d70f11 +size 109420 diff --git a/jdE1T4oBgHgl3EQfNQNY/content/2301.02999v1.pdf b/jdE1T4oBgHgl3EQfNQNY/content/2301.02999v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a9dc98699e55cd66ae11cf7e7de1e1a7f57ec03c --- /dev/null +++ b/jdE1T4oBgHgl3EQfNQNY/content/2301.02999v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c41b1d39b0435da829da8bd136c81dccfd7c8583b1c0006eb83333be51a7bd7 +size 13572610 diff --git a/k9AzT4oBgHgl3EQfNfvd/vector_store/index.pkl b/k9AzT4oBgHgl3EQfNfvd/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..dc3e38a7fbddc4a921b24b2c033d42e27c0ffcc3 --- /dev/null +++ b/k9AzT4oBgHgl3EQfNfvd/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b95994e91fe20c3d6132b388624d78e198eb17ac6d3e6fb466b2ff6a48787515 +size 132029 diff --git a/kNA0T4oBgHgl3EQfI_8Y/content/tmp_files/2301.02082v1.pdf.txt b/kNA0T4oBgHgl3EQfI_8Y/content/tmp_files/2301.02082v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a36ae703a40160c7125eaf2b78c64c7618c5f87 --- /dev/null +++ b/kNA0T4oBgHgl3EQfI_8Y/content/tmp_files/2301.02082v1.pdf.txt @@ -0,0 +1,1264 @@ +arXiv:2301.02082v1 [math.GT] 5 Jan 2023 +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM +BOOK EMBEDDINGS OF COMPLETE GRAPHS +YASMIN AGUILLON, ERIC BURKHOLDER, XINGYU CHENG, SPENCER EDDINS, +EMMA HARRELL, KENJI KOZAI, ELIJAH LEAKE, AND PEDRO MORALES +Abstract. A book embedding of a complete graph is a spatial embedding +whose planar projection has the vertices located along a circle, consecutive +vertices are connected by arcs of the circle, and the projections of the remaining +“interior” edges in the graph are straight line segments between the points +on the circle representing the appropriate vertices. A random embedding of +a complete graph can be generated by randomly assigning relative heights to +these interior edges. We study a family of two-component links that arise as the +realizations of pairs of disjoint cycles in these random embeddings of graphs. In +particular, we show that the distribution of linking numbers can be described +in terms of Eulerian numbers. Consequently, the mean of the squared linking +number over all random embeddings is +i +6, where i is the number of interior +edges in the cycles. We also show that the mean of the squared linking number +over all pairs of n-cycles in K2n grows linearly in n. +1. Introduction +Random knot models have been used to study the spatial configurations of poly- +mers such as DNA, whose length is 1,000 to 500,000 times the length of the diameter +of the nucleus [12]. With such a long molecule confined to a compact space, DNA +can become knottted, tangled, or linked. In order for cell replication to occur, DNA +must unknot itself with the aid of a special enzyme known as topoisomarase that +cuts through the knotted parts of the DNA molecule and reconnects any loose ends, +and problems can arise during cellular replication if topoisomarase enzymes do not +work properly [14]. By comparing the topological invariants of DNA before and +after enzymes act on it, we can learn more about mechanisms of these enzymes +and their effects on the structure of DNA [15]. Because many polymers are too +small to image in detail, several authors have used mathematical models to study +configurations of long polymer chains by introducing versions of uniform random +distributions of polygonal chains in a cube [1, 2, 6, 7, 18, 20, 22]. Even-Zohar, et +al. introduced a random model based on petal diagrams of knots and links where +the distribution of links can be studied in terms of random permutations, achieving +an explicit description of the asymptotic distribution for the linking number [11]. +Random graph embeddings can be thought of as generalizations of random knot +embeddings to molecules with non-linear structures. In [13], a random graph em- +bedding model generalizing the uniform random distributions of polygonal chains +in a cube was used study the behavior of linking numbers and writhe. In this paper, +2020 Mathematics Subject Classification. 57M15, 57K10, 05C10. +Key words and phrases. book embeddings of graphs, linking in spatial graphs, Eulerian +numbers. +The authors were supported in part by NSF Grant DMS-1852132. +1 + +2 AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES +we study an alternate random embedding model similar to the Petaluma model in +[11] in that the distribution of random embeddings can be described in terms of a +random choice of permutations. This model is based on book embeddings of the +complete graph Kn. Rowland has classified all possible links that could appear +in book embeddings of K6 [21], and we consider the more general case of links in +K2n. In particular, we study a special class of two-component links that appear +in book embedding which are unions of disjoint monotonic cycles, and we describe +the behavior of the linking number in terms of the combinatorial properties of the +length of the cycles and the number of interior edges in the book embedding. We +show that the mean value of the squared linking number grows linearly with respect +to both quantitites in Theorem 10 and Theorem 11. +2. Random book embeddings +Given a graph G, Atneosen [3] and Persinger [19] introduced the notion of a +book embedding of G, which is a particular class of spatial embedding of a graph in +which the vertices of the graph are placed along a fixed line in R3 called the spine +of the book. The edges of G are embedded on half-planes, called sheets, which +are bounded by the spine. +Classically, the edges are drawn as disjoint circular +arcs on their respective sheets. Instead, we will consider the circular diagram for +a book embedding of Kn introduced by Endo and Otsuki in which the spine is a +circle consisting of the vertices and edges between consecutive vertices, the pages +are discs bounded by the spine, and the remaining edges are straight lines between +vertices of a given page [8, 9]. +We focus on book embeddings of the complete graph K2n (or sometimes Km+n) +on 2n verticles. +In our model, the 2n vertices will be labeled as v1, . . . , v2n in +clockwise order around the circular spine. The perimeter of the circle will form +the edges between consecutive vertices vj and vj+1 for all j ∈ {1, 2, · · · , 2n}, where +the indices are taken modulo 2n. We denote these edges as exterior edges. The +remaining +�2n +2 +� +− 2n edges are interior edges, and a book embedding is determined +by dividing the interior edges among a finite number of sheets so that no two edges +within a page intersect. +In order to generate a random book embedding, we embed each interior edge on +its own separate sheet. The ordering of sheets can then be determined by a random +permutation σ of {1, . . . , +�2n +2 +� +− 2n} with the uniform distribution. We can think of +the permutation as giving the height order of the sheets, so that edge ei is in a sheet +above edge ej if σ(i) > σ(j). Note that a random book embedding will typically be +equivalent to a book embedding with far fewer sheets. When edges in two adjacent +sheets do not cross in a circular diagram, the two sheets can be combined to a +single sheet in which the two edges are embedded without intersecting, obtaining +an equivalent embedding with one fewer sheet. +3. Preliminary definitions +The image of two disjoint cycles in a graph G under an embedding forms a two- +component link. We can compute the linking number of any oriented link L in R3 +by considering the signed crossings of the two components in a planar projection +with the rule indicated in Figure 1. We will denote half of the sum of the signed +crossings as the linking number ℓ(L) of a link L. This gives a quantitative measure +of how interwined the two components are. In an abuse of notation, given two + +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS +3 +Figure 1. A positive crossing (left) and a negative crossing (right) +> +v1 +v2 +v3 +v4 +v1v2v3v4 +monotonic +> +v1 +v2 +v3 +v4 +v1v3v2v4 +non-monotonic +Figure 2. Monotonic (left) and non-monotonic (right) cycles +oriented cycles P and Q of a graph G and a fixed embedding, we will let ℓ(P ∪ Q) +mean the linking number of the image of the two cycles under the embedding. +We introduce a special class of links in book embeddings of a graph. +Definition 1. Let K2n be a complete graph with vertices enumerated as {v1, . . . , v2n} +in cyclic order along the spine of a book embedding of K2n. An oriented cycle with +consecutive edges {−−−→ +vi1vi2, −−−→ +vi2vi3, . . . , −−−−−→ +vik−1vik, −−−→ +vikvi1} is +(1) strictly increasing if there is a cyclic permutation i′ +1, . . . , i′ +k of i1, . . . , ik such +that i′ +j < i′ +j+1 for all j ∈ {1, 2, . . ., k − 1}. +(2) strictly decreasing if there is a cyclic permutation i′ +1, . . . , i′ +k of i1, . . . , ik such +that i′ +j > i′ +j+1 for all j ∈ {1, 2, . . ., k − 1}. +(3) monotonic if the cycle is either strictly increasing or strictly decreasing. +The 4-cycle on the left in Figure 2 is monotonic because beginning with the +vertex v1, the vertices in the cycle in order are v1, v2, v3, v4, which has strictly +increasing indices. However, the order of the vertices in the 4-cycle on the right is +v1, v3, v2, v4. The indices are not monotonic even up to cyclic permutation, so this +cycle is not monotonic. +Finally, we also introduce the Eulerian numbers, which arise in combinatorics as +coefficients of Eulerian polynomials [4, 10, 16]. +Definition 2. Let σ ∈ Sn be a permutation on {1, . . . , n}. An ascent of the per- +mutation is a value 1 ≤ k ≤ n − 1 such that σ(k) < σ(k + 1). +Definition 3. The Eulerian number A(n, m) is the number of permutations σ ∈ Sn +that have exactly m ascents. +As an example, we have the following exhaustive list of permutations in S3: +(1,2,3); (1,3,2); (2,1,3); (2,3,1); (3,1,2); (3,2,1). + +4 AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES +Among these permutations, (1,2,3) has two ascents, (1,3,2), (2,1,3), (2,3,1), and +(3,1,2) each have one ascent, and (3,2,1) has no ascents. +Hence, A(3, 2) = 1, +A(3, 1) = 4, and A(3, 0) = 1. Note that A(n, n) = 0 for all n > 0. Additionally, +there is always exactly one permutation in Sn with no ascents and exactly one +permutation in Sn with n − 1 descents, which are (n,n − 1,. . . ,1) and (1,2,. . . ,n), +respectively. Hence, A(n, 0) = A(n, n − 1) = 1. +Eulerian numbers are coefficients of Eulerian polynomials, +An(t) = +n +� +m=0 +A(n, m)tm, +where An(t) is recursively defined by the relations, +A0(t) = 1, +An(t) = t(1 − t)A′ +n−1(t) + An−1(t)(1 + (n − 1)t), +for n > 0. +It is also known that +A(n, m) = +m+1 +� +k=0 +(−1)k +�n + 1 +k +� +(m + 1 − k)n, +and the exponential generating function for the Eulerian numbers is +∞ +� +n=0 +∞ +� +m=0 +A(n, m)tm xn +n! = +t − 1 +t − e(t−1)x . +From the definition, it is also evident that for a fixed n, the sum of Eulerian +numbers A(n, m) over all possible values of m gives the number of all permutations, +|Sn|, so that +n +� +m=0 +A(n, m) = n!. +4. Linking numbers of disjoint monotonic cycles +In this paper, we will consider the distribution of linking numbers of two disjoint +monotonic cycles in random book embeddings. First, note the following fact about +the number of interior edges of two monotonic cycles in a book embedding. +Lemma 4. Two disjoint monotonic cycles of length m and n in a book embedding +of Km+n must have an equal number of interior edges, which is also equal to half +the number of crossings between the two cycles. +Proof. Let P and Q be an m-cycle and n-cycle in a book embedding, respectively, +and suppose that P has i interior edges. Let −−→ +vjvk be an interior edge of P. Then +vk−1 must be a vertex in Q, and there is a smallest h > k such that vh is a vertex in +Q. Then −−−−→ +vk−1vh is an edge in Q which crosses the edge −−→ +vjvk of P. Similarly, there +is an edge −−−−→ +vsvj+1 in Q that crosses −−→ +vjvk, and no other edge in Q can cross −−→ +vjvk. +Hence, the number of crossings between P and Q is twice the number of interior +edges in P. By symmetry, this is also equal to twice the number of interior edges +in Q. +□ +Lemma 4 implies that if P and Q are both n-cycles and P consists of n interior +edges, then all edges in Q must also be interior. We now relate the number of +disjoint cycles with fixed linking number to the Eulerian numbers A(m, n). + +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS +5 +Theorem 5. Suppose P and Q are both strictly increasing n-cycles in K2n so that +P and Q both consist of n interior edges. The proportion of random book embeddings +of K2n for which P and Q have linking number equal to ℓ is +A(2n − 1, n + ℓ − 1) +(2n − 1)! +. +Proof. Let P and Q be two strictly increasing cycles, each with n interior edges. +Consider a permutation of all of the interior edges of K2n, which determines the +ordering of their respective sheets in a book embedding. As we are only concerned +with the linking number ℓ(P ∪ Q), we only need the relative orderings of the edges +of P and Q in order to resolve the signs of any crossings between interior edges of P +and Q. By designating these edges as e1, . . . , e2n, we may consider the permutation +σ as a permutation of {1, . . ., 2n}. +Without loss of generality, we label the topmost edge of the permutation of +interior edges as edge e2n. Since the edges in the cycle are directed so that the +cycle is strictly increasing, we may begin numbering the vertices of K2n so that the +initial vertex of e2n is vertex v2n. We then number the vertices in cyclic order, so +that the vertex in K2n that lies next in the clockwise direction from v2n is v1, the +following vertex (which is the terminal vertex of e2n) is v2, and so on. The edge +indices will then also be identified with their initial vertex, so that the edge −−→ +v1v3 is +e1, the edge −−→ +v2v4 is e2, and so on, until the edge −−−−−→ +v2n−1v1 is labeled e2n−1 and edge +−−−→ +v2nv2 is labeled e2n. Under this labeled scheme, edge ej will have crossings with +edges ej−1 and ej+1, where indices are taken modulo 2n. +The bijective function σ from {1, . . . , 2n} to itself determines the relative heights +of the edges so that whenever σ(j) > σ(k), then ej is in a sheet above the sheet +containing ek, and whenever σ(j) < σ(k), ej is embedded in a sheet below the sheet +containing ek. Since both cycles are strictly increasing, the sign of the crossing +between edge ej and edge ej+1 can be determined by σ(j) and σ(j + 1). When +σ(j) > σ(j + 1), the sign of the crossing is negative. When σ(j) < σ(j + 1), the +sign of the crossing is positive, as seen in Figure 3. Therefore, the linking number +is half the quantity of the number of times σ(j) < σ(j + 1) minus the number of +times σ(j) > σ(j + 1). +By construction, σ(2n) = 2n, so that σ(2n−1) < σ(2n) and σ(2n) > σ(1). Since +this results in exactly one positive crossing and one negative crossing, crossings in- +volving the edge e2n have zero net effect on the linking number. We may ignore +edge 2n in the permutation and consider only a further restriction of the permuta- +tion to a permutation σ′ of {1, . . . , 2n − 1}. Topologically, this can be thought of +as applying a Reidemeister Move 2, sliding the topmost edge away to the exterior +of the binding so that the edge e2n no longer has any crossings with edges e2n−1 +and e1 +Notice that σ′(j) < σ′(j + 1) is the same as an ascent in σ′ and σ′(j) > σ′(j + 1) +is the same as a descent in σ′. So the linking number of P and Q depends on the +number of ascents of the permutation σ′. If σ′ has m ascents, it has 2n − 2 − m +descents, so that the linking number is 1 +2[m− (2n− 2 − m)]. Setting this equal to ℓ, +then m = n + ℓ − 1. Thus, we conclude that the number of permutations in S2n−1 +that lead to a linking number of ℓ is A(2n − 1, n + ℓ − 1). For each permutation +σ′ ∈ S2n−1, there are an equal number of permutations of the edges of K2n that +restrict to σ′, so that the proportion of random book embeddings in which P and + +6 AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES +ej+1 +ej +σ(j) > σ(j + 1) +negative crossing +ej+1 +ej +σ(j) < σ(j + 1) +positive crossing +Figure 3. A negative crossing (left) and a positive crossing (right) +in terms of σ(j) and σ(j + 1) +v4 +v3 +v2 +v1 +v6 +v5 +e6 +e1 +e2 +e3 +e4 +e5 +Figure 4. Solomon’s link as a union of two monotonic 3-cycles in K6. +Q have linking number ℓ is +A(2n − 1, n + ℓ − 1) +(2n − 1)! +. +□ +An example of the connection between ascents, descents, crossing signs, and +linking number is shown in Figure 4 and Table 1. Observe in Table 1 that σ(5) < +σ(6). Thus j = 5 would be an ascent. However, as σ(6) > σ(1), the signed crossing +between e5 and e6 is canceled out with the signed crossing between e6 and e1. +Considering only j = 1 ,2, 3, 4 we are left with four descents, which lead to four +negative crossings and a linking number of −2. + +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS +7 +j +σ(j) +crossing of ej and ej+1 +ascent or descent +1 +5 +− +descent +2 +4 +− +descent +3 +3 +− +descent +4 +2 +− +descent +5 +1 ++ +ascent +6 +6 +− +Table 1. Signed crossings and ascents/descents in height function +σ for the example in Figure 4. +We remark that the results from Theorem 5 extend to the more general case +of two monotonic cycles of length m and n with i interior edges each. The sign +of the linking number will flip whenever we reverse the orientation of one of the +cycles, so if we have two monotonic cycles P and Q of length n which are not +necessarily strictly increasing, this would result in replacing ℓ with −ℓ in the result +of Theorem 5. However, the Eulerian numbers have the symmetry property that +A(n, m) = A(n, n − 1 − m), so that A(2n − 1, n − ℓ − 1) = A(2n − 1, n + ℓ − 1). +This results in an identical proportion of book embeddings in which the cycles +have linking number ℓ, thus whether the cycles are strictly increasing or strictly +decreasing has no net effect on the distribution of linking numbers as long as they +are both monotonic. +In the case where P and Q have lengths m and n, respectively, Lemma 4 states +that both P and Q have the same number of interior edges, which we will denote by +i. Contracting Km+n along all of the exterior edges in P and Q does not alter the +topological type of the link P ∪ Q, and the proportion of random book embeddings +of Km+n for which the linking number of P ∪ Q is equal to ℓ will be the same as +the proportion of book embeddings of the contracted graph K′ in which the linking +number of P ∪ Q is equal to ℓ by a similar argument as in Theorem 5. Hence, we +arrive at the following when i ≥ 3. +Corollary 6. Let P and Q be monotonic cycles of length m and n, respectively, in +Km+n. The proportion of random book embeddings of Km+n in which the linking +number of P ∪ Q is equal to ℓ is +A(2i − 1, i + ℓ − 1) +(2i − 1)! +, +where i ≥ 2 is the number of interior edges of both P and Q. +The exceptional case when i = 2 can be verified to follow the same formula as in +Corollary 6 by contracting to two 3-cycles with two interior edges and one exterior +edge each, then applying the argument in Theorem 5 to the interior edges only. +Table 2 gives the values of A(2i − 1, i + ℓ − 1) for 1 ≤ i ≤ 5. The proportion of +random book embeddings for which two cycles with i interior edges have a linking +number of ℓ can be obtained by dividing the entries by (2i − 1)!. +The following theorem describes the number of disjoint m- and n-cycles with a +given number of interior edges. In combination with the previous corollary, this will +allow for calculation of the frequency with which a random m-cycle P and disjoint +n-cycle Q has linking number ℓ in a random book embedding of Km+n. + +8 AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES +i\ℓ +-5 +-4 +-3 +-2 +-1 +0 +1 +2 +3 +4 +5 +1 +1 +2 +1 +4 +1 +3 +1 +26 +66 +26 +1 +4 +1 +120 +1191 +2416 +1191 +120 +1 +5 +1 +502 +14608 +88234 +156190 +88234 +14608 +502 +1 +Table 2. Values of A(2i − 1, i + ℓ − 1) +Theorem 7. Let m, n ≥ 3. Then the number of disjoint (undirected) monotonic +cycles P and Q in a book embedding of Km+n so that P is an m-cycle and Q is a +n-cycle, each with 2 ≤ i ≤ min{m, n} interior edges is +� m +m − i +��n − 1 +n − i +� ++ +� n +n − i +��m − 1 +m − i +� +, +if m ̸= n. In the case that m = n, the number of disjoint cycles is +� n +n − i +��n − 1 +n − i +� +. +Proof. Fix a labeling of the vertices of Km+n in cyclic order v1, . . . , vm+n. Suppose +P is a m-cycle and Q is a n-cycle. +First, suppose P contains v1. If P has i interior edges, there are +�m +i +� +ways to +choose which of the m edges in P are interior edges. For each of the i chosen edges +in P, in order for it to be interior, there must be a vertex in the cycle Q lying +between the initial and terminal vertices of the edge in P. Moreover, for each of +the external edges in the cycle P, there cannot be any vertices of Q lying between +the initial and terminal vertices. This create i areas in which the vertices of Q must +be located, one between the initial and terminal vertices of each internal edge in +P, with each containing at least one vertex. A stars and bars argument, in which +there are n − i vertices of Q to allocate after placing one vertex of Q into each of +the i spots, and i − 1 bars to separate the i spots, leads to +�n−1 +n−i +� +ways of choosing +the vertices of Q. This results in +� m +m−i +��n−1 +n−i +� +choices of P and Q so that P contains +v1 and both cycles have i interior edges. +By an analogous argument, there are +� n +n−i +��m−1 +m−i +� +ways to choose P and Q so +that Q contains v1, completing the proof when m ̸= n. +If m = n, there is no distinction between the cases when v1 is in P and v1 is in +Q. +□ +The number of disjoint n cycles in K2n with i interior edges is tabulated in Table +3 for 3 ≤ n ≤ 10. +The values +� n +n−i +��n−1 +n−i +� +appear as OEIS sequence A103371 [17] up to a shift in +indices due to the cyclic symmetry in the circular diagrams of book embeddings. +The sum over all i gives the number of ways to choose two disjoint monotonic n- +cycles in K2n. An undirected monotonic cycle is determined by the vertices in the +cycles, so this amounts to choosing two disjoint subsets of n vertices from the 2n +vertices in K2n. The number of ways in which this choice can be made is given by +�2n−1 +n−1 +� += +�2n−1 +n +� +. +Combining Theorem 7 with Theorem 5 yields the following corollary. + +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS +9 +n \ i +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +3 +3 +6 +1 +4 +4 +18 +12 +1 +5 +5 +40 +60 +20 +1 +6 +6 +75 +200 +150 +30 +1 +7 +7 +126 +525 +700 +315 +42 +1 +8 +8 +196 +1176 +2450 +1960 +588 +56 +1 +9 +9 +288 +2352 +7056 +8820 +4704 +1008 +72 +1 +10 +10 +405 +4320 +17640 +31752 +26460 +10080 +1620 +90 +1 +Table 3. Number of pairs of monotonic n-cycles each with i in- +terior edges in K2n. +−6 +−4 +−2 +0 +2 +4 +6 +0 +0.2 +0.4 +0.6 +0.8 +Linking Number +Proportion of links +K6 +K8 +K10 +K12 +Figure 5. Proportion of disjoint pairs of n-cycles with a given +linking number in a random book embedding of K2n. +Corollary 8. The proportion of links P ∪ Q with linking number ℓ among pairs of +n-cycles P and Q in a random book embedding of K2n is +n +� +i=1 +A(2i − 1, ℓ + i − 1) +(2i − 1)! +� n +n − i +��n − 1 +n − i +� +�2n − 1 +n − 1 +� +. +The values from Corollary 8 for n = 3, 4, 5, and 6 are computed and illustrated +in Figure 5. Notice that for two n-cycles in K2n, the maximum number of crossings +that can appear is 2n, meaning that an upper bound for the absolute value of the +linking number is n. Thus, we can normalize the linking number of two monotonic +cycles by dividing by n. The distribution of links with a given normalized linking +number when n = 100, 200, 500, and 1000, are shown in Figure 6. As n increases, +the proportion of links with linking number 0 decreases. However, this behavior +is misleading as links are distributed among a larger range of possible values for + +10AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES +−1 +−0.5 +0 +0.5 +1 +0 +5 · 10−2 +0.1 +0.15 +0.2 +K100 +K200 +K500 +K1000 +Figure 6. Proportion of links with specified normalized linking +number for two monotonic n-cycles in a random book embedding +of K2n +−1 +−0.5 +0 +0.5 +1 +0 +0.2 +0.4 +0.6 +K100 +K200 +K500 +K1000 +Figure 7. Density of links with specified normalized linking num- +ber for two monotonic n-cycles in a random book embedding of +K2n +the linking number as n increases. Normalizing the graph to a density plot as in +Figure 7 gives a very different picture of the behavior of linking numbers of disjoint +n-cycles in random book embeddings of K2n. As the number of vertices increases, +the normalized linking numbers tend closer to 0 as n increases. This model behaves +differently from other models where the mean squared linking number grows as +θ(n2), as in [1, 2, 18]). +In fact, using the exponential generating function for the Eulerian numbers, we +can determine an explicit formula for the mean squared linking number in terms + +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS +11 +of the number of interior edges i. We will need the following fact from differential +calculus. +Lemma 9. Let g(x) = +xn +(1−x)m . Then for k ≥ 1, g(k)(0) = k! +�k−n+m−1 +m−1 +� +. +Proof. For |x| < 1, we can express +1 +1−x as the power series +1 +1 − x = x0 + x1 + x2 + x3 + . . . . +Then, +g(x) = xn(x0 + x1 + x2 + x3 + . . . )m, +so that g(k)(0) +k! +is the coefficient of xk in the power series expansion of g(x). This is +the xk−n coefficient of (x0 + x1 + x2 + x3 + . . . )m, which is the number of ways to +choose m non-negative integers that add up to k − n. A stars and bars argument +counts this as +�k−n+m−1 +m−1 +� +, with this binomial coefficient defined to be 0 if k < n. +□ +We are now ready to show that the mean squared linking number of two disjoint +cycles grows linearly in the number of interior edges i. Heurestically, this means +that we expect that the linking number grows roughly as the square root of the +number of internal edges. +Theorem 10. Let P ∪ Q be a union of disjoint n cycles with i interior edges each. +Then the mean squared linking number of P ∪ Q in a random book embedding is i +6. +Proof. The exponential generating function for the Eulerian numbers is +∞ +� +n=0 +∞ +� +m=0 +A(n, m)tm xn +n! = +t − 1 +t − e(t−1)x . +Multiplying both sides by t−i+1, we arrive at, +∞ +� +n=0 +∞ +� +m=0 +A(n, m)tm−i+1 xn +n! = t−i+1(t − 1) +t − e(t−1)x . +Notice that differentiating the left-hand side twice with respect to t and taking the +limit as t → 1 yields +∞ +� +n=0 +∞ +� +m=0 +� +(m − i + 1)2 − (m − i + 1) +� +A(n, m)xn +n! . +Differentiating this expression 2i−1 times with respect to x and evaluating at x = 0 +results in +∞ +� +m=0 +(m − i + 1)2A(2i − 1, m) − (m − i + 1)A(2i − 1, m). +After a substitution of ℓ = m − i + 1, this becomes +i−1 +� +ℓ=−i+1 +A(2i − 1, i + ℓ − 1)ℓ2 − A(2i − 1, i + ℓ − 1)ℓ = +i−1 +� +ℓ=−i+1 +A(2i − 1, i + ℓ − 1)ℓ2 += (2i − 1)!E[ℓ(P ∪ Q)2], +as the symmetry in the Eulerian triangle means that the expected value of the +linking number is 0. Hence, the second part of the summation vanishes. + +12AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES +We now repeat the differentiation on the exponential generating function to find +an equivalent expression utilizing logarithmic differentiation. We set f(t, x) to be +the exponential generating function, +f(t, x) = t−i+1(t − 1) +t − e(t−1)x , +and first compute using L’Hˆopital’s rule, +lim +t→1 f(t, x) = 1 · lim +t→1 +t − 1 +t − e(t−1)x = lim +t→1 +1 +1 − xe(t−1)x = +1 +1 − x. +Using logarithmic differentiation, we find that, +ft(t, x) +f(t, x) = −i + 1 +t ++ +1 +t − 1 − 1 − xe(t−1)x +t − e(t−1)x += −i + 1 +t ++ (t − e(t−1)x) − (t − 1)(1 − xe(t−1)x) +(t − 1)(t − e(t−1)x) += −i + 1 +t ++ 1 − e(t−1)x + (t − 1)xe(t−1)x +(t − 1)(t − e(t−1)x) +. +Taking the limit as t → 1 using L’Hˆopital’s rule twice, we obtain, +lim +t→1 +ft(t, x) +f(t, x) = (−i + 1) + lim +t→1 +(t − 1)x2e(t−1)x +(t − e(t−1)x) + (t − 1)(1 − xe(t−1)x) += (−i + 1) + lim +t→1 +x2e(t−1)x + (t − 1)x3e(t−1)x +1 − xe(t−1)x + 1 − xe(t−1)x + (t − 1)(−x2e(t−1)x) += (−i + 1) + x2 +2 · +1 +1 − x. +The second derivative of log f(t, x) is +ftt(t, x) +f(t) +− +�ft(t, x) +f(t, x) +�2 += −−i + 1 +t2 +− +1 +(t − 1)2 + x2e(t−1)x +t − e(t−1)x + (1 − xe(t−1)x)2 +(t − e(t−1)x)2 += −−i + 1 +t2 ++ −(t − e(t−1)x)2 + (t − 1)2[(t − e(t−1)x)x2e(t−1)x + (1 − xe(t−1)x)2] +(t − 1)2(t − e(t−1)x)2 +. +Taking the limit as t → 1 using L’Hˆopital’s rule four times yields, +lim +t→1 +ftt(t, x) +f(t) +− +�ft(t, x) +f(t, x) +�2 += −(−i + 1) + x3 +3 · +1 +(1 − x)2 − x4 +12 · +1 +(1 − x)2 . +We can then find, +lim +t→1 ftt(t, x) = lim +t→1 f(t) +� +ftt(t, x) +f(t) +− +�ft(t, x) +f(t, x) +�2 ++ +�ft(t, x) +f(t, x) +�2� += i(i − 1) +1 − x + (−i + 1)x2 +(1 − x)2 ++ +�x3 +3 + x4 +6 +� +1 +(1 − x)3 . + +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS +13 +By Lemma 9, the (2i − 1)-th derivative in x evaluated at x = 0 is +(2i − 1)! +� +i(i − 1) + (−i + 1)(2i − 2) + 1 +3 +�2i − 2 +2 +� ++ 1 +6 +�2i − 3 +2 +�� += (2i − 1)! +� +(i − 1)(−i + 2) + (2i − 2)(2i − 3) +6 ++ (2i − 3)(2i − 4) +12 +� += (2i − 1)! i +6. +Hence, +(2i − 1)!E[ℓ(P ∪ Q)2] = (2i − 1)! i +6, +completing the proof of the theorem. +□ +Using Theorem 10, we can find the asymptotic behavior of the mean squared +linking number over all pairs of disjoint n cycles in K2n. Recall that a function f(n) +is in order θ(n) if there are positive constants a, A, and N such that an ≤ f(n) ≤ An +for all n > N. +Theorem 11. Let n ≥ 3. Then the mean squared linking number of two cycles P +and Q taken over all pairs of disjoint n-cycles across all random book embeddings +of K2n is in order θ(n). +Proof. By combining Theorem 7 and Theorem 10 and summing over the number +of interior edges, the mean squared linking number is +1 +�2n−1 +n−1 +� +n +� +i=2 +� n +n − i +��n − 1 +n − i +� i +6. +Since +i +� n +n − i +� += i +�n +i +� += n +�n − 1 +i − 1 +� +, +this becomes +(1) +1 +�2n−1 +n−1 +� +n +� +i=2 +n +6 +�n − 1 +i − 1 +�2 += n +6 · +1 +�2n−1 +n−1 +� +n +� +i=2 +�n − 1 +i − 1 +�2 +. +Using Vandermonde’s identity, the summation part of the right-hand side becomes +n +� +i=2 +�n − 1 +i − 1 +�2 += +�n−1 +� +i=0 +�n − 1 +i +�2� +− +�n − 1 +0 +�2 += +�2n − 2 +n − 1 +� +− 1. +Thus, Equation (1) yields +n +6 · +1 +�2n−1 +n−1 +� +��2n − 2 +n − 1 +� +− 1 +� += n +6 +� +n +2n − 1 − +1 +�2n−1 +n−1 +� +� +. +For an upper bound, we have +n +6 +� +n +2n − 1 − +1 +�2n−1 +n−1 +� +� +≤ n +6 · +n +2n − 1 ≤ n +6 . +For a lower bound, we note that if n ≥ 3, +�2n − 1 +n − 1 +� += 2n − 1 +1 +· 2n − 2 +2 +· · · · · n + 1 +n − 1 · n +n ≥ (2n − 1)(n − 1) ≥ 2(2n − 1). + +14AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES +5 +10 +15 +20 +25 +30 +0.5 +1 +1.5 +2 +2.5 +n +Mean squared linking number +Figure 8. Mean squared linking number of two disjoint n-cycles +in a random book embedding of K2n +Hence, +n +6 +� +n +2n − 1 − +1 +�2n−1 +n−1 +� +� +≥ n +6 +� +n +2n − 1 − +1 +2(2n − 1) +� += n +6 · n − 1 +2 +2n − 1 = n +6 · 1 +2 = n +12. +□ +Sample calculations of the mean squared linking number of two n-cycles in K2n +can be seen to asymptotically approach +n +12, as seen from the nearly linearly re- +lationship between n and the mean squared linking number in Figure 8. When +n = 100 and n = 1000, the approximate value of the mean squared linking number +can be computed from the summation formula in Theorem 11 to be ≈ 8.37521 and +≈ 83.375, respectively. +5. Links in random book embeddings of K6 +In this section, we consider the special case of random book embeddings of +K6. Rowland has studied all possible topological types of book embeddings of K6, +showing that the set of non-trivial knots and links that appear are the trefoil knot, +figure-eight knot, the Hopf link, and the Solomon’s link [21]. Any two-component +link in K6 must consist of two disjoint 3-cycles, and every 3-cycle is necessarily +monotonic. Moreover, the trivial link has linking number 0, the Hopf link has link- +ing number ±1, and the Solomon’s link (shown in Figure 4) has linking number ±2. +Hence, we can utilize Theorems 7 and 10 and in the case that n = 3 to determine +the probabilities of each type of link occuring in a random book embedding. +We separately consider the cases when the number of interior edges in the 3- +cycles is i = 1, 2, and 3 as in Figure 9, and determine the probability of each type +of link occuring in each case. We can then combine with the counts in Table 3 +to compute the overall probability that a randomly selected two-component link is +either trivial, a Hopf link, or a Solomon’s link. + +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS +15 +Figure 9. Projections of two 3-cycles in K6 with i = 1 (left), +i = 2 (middle), and i = 3 (right) interior edges. +When i = 1, it is evident that since the projection of the two cycles has no +crossings, then the two-component link is trivial. +When i = 2, Table 2 implies that the probability that the two cycles are the +Hopf link is p2 = 1 +3, and the probability that the two cycles are the trivial link is +1 − p2 = 2 +3. +When i = 3, Table 2 implies that the probability that the two cycles form +the Solomon’s link is q3 = +1 +60, the probability that the two cycles form the Hopf +link is p3 = +13 +30, and the probability that the two cycles form the trivial link is +1 − p3 − q3 = 11 +20. +Table 3 details the frequency with each the 10 cycles in K6 have 1, 2, or 3 interior +edges. From this, we determine that the probability that a randomly chosen pair +of disjoint 3-cycles in a random book embedding of K6 is trivial is +1 +10 +� +3 · 1 + 6 · 2 +3 + 1 · 11 +20 +� += 151 +200. +Similarly, the probability that a randomly chosen pair of disjoint 3-cycles in a +random book embedding of K6 is the Hopf link is +1 +10 +� +3 · 0 + 6 · 1 +3 + 1 · 13 +30 +� += 73 +300. +Finally, the probability that a randomly chosen pair of disjoint 3-cycles in a random +book embedding of K6 is the Solomon’s link is +1 +10 +� +3 · 0 + 6 · 0 + 1 · 1 +60 +� += +1 +600. +Since K6 contains 10 distinct disjoint pairs of 3-cycles, this implies that in a +random book embedding of K6, the expected number of trivial links is 151 +20 , the +expected number of Hopf links is 73 +30, and the expected number of Solomon’s links +is +1 +60. It is a classical result in spatial graph theory that every embedding of K6 +contains at least one non-trivial link [5]. In a random book embedding of K6, the +expected number of non-trivial links is 49 +20, with nearly all of the non-trivial links +represented by Hopf links. +6. Acknowledgments +The authors would like to thank the National Science Foundation for supporting +this work. This research was partially supported by National Science Foundation +Grant DMS-1852132. +In addition, the authors would like to thank the Department of Mathematics +at Rose-Hulman Institute of Technology for their hospitality and for hosting the + +16AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES +Rose-Hulman Institute of Technology Mathematics Research Experience for Un- +dergraduates, where most of this work was completed. +References +[1] J. Arsuaga, B. Borgo, Y. Diao, and R. Scharein. The growth of the mean average crossing +number of equilateral polygons in confinement. J. Phys. A, 42(46):465202, 9, 2009. +[2] Javier Arsuaga, T Blackstone, Yuanan Diao, E Karadayi, and M Saito. Linking of uniform +random polygons in confined spaces. Journal of Physics A: Mathematical and Theoretical, +40:1925, 02 2007. +[3] Gail Atneosen. One-dimensional n-leaved continua. Fundamenta Mathematicae, 74:43–45, +1972. +[4] Louis Comtet. Advanced Combinatorics: The Art of Finite and Infinite Expansions, chapter 6. +D. Reidel Publishing Company, Boston, USA, revised and enlarged edition, 1974. +[5] J. H. Conway and C. McA. Gordon. Knots and links in spatial graphs. J. Graph Theory, +7(4):445–453, 1983. +[6] Y. Diao, C. Ernst, S. Saarinen, and U. Ziegler. Generating random walks and polygons with +stiffness in confinement. J. Phys. A, 48(9):095202, 19, 2015. +[7] Y. Diao, N. Pippenger, and D.W. Sumners. On random knots. In Random knotting and +linking (Vancouver, BC, 1993), volume 7 of Ser. Knots Everything, pages 187–197. World +Sci. Publ., River Edge, NJ, 1994. +[8] Toshiki Endo and Takashi Otsuki. Notes on spatial representations of graphs. Hokkaido Math. +J., 23(3):383–398, 1994. +[9] Toshiki Endo and Takashi Otsuki. Knots and links in certain spatial complete graphs. J. +Comb. Theory. Ser. B, 68:23–35, 1996. +[10] Leonhard Euler. Foundations of Differential Calculus. Springer Science and Business Media, +2000. +[11] Chaim Even-Zohar, Joel Hass, Nati Linial, and Tahl Nowik. Invariants of random knots and +links. Discrete & Computational Geometry, 56(2):274–314, Jun 2016. +[12] Erica Flapan. Knots, Molecules and the Universe: An Introduction to Topology. American +Mathematical Society, United States of America, 2016. +[13] Erica Flapan and Kenji Kozai. Linking number and writhe in random linear embeddings of +graphs. J. Math. Chem., 54(5):1117–1133, 2016. +[14] S. Harrell M. Beals, L. Gross. Dna and knot theory. TIEM, 1999, Online. +[15] R. Mishra and S. Bhushan. Knot theory in understanding proteins. J. Math. Biol., 65:1187– +1213, 2012. +[16] OEIS Foundation Inc. A008292 - OEIS. https://oeis.org/A008292. Accessed: 2022-10-18. +[17] OEIS Foundation Inc. A103371 - OEIS. https://oeis.org/A103371. Accessed: 2022-10-18. +[18] E. Panagiotou, K. C. Millett, and S. Lambropoulou. The linking number and the writhe of +uniform random walks and polygons in confined spaces. J. Phys. A, 43(4):045208, 28, 2010. +[19] C.A. Persinger. Subsets of n-books in E3. Pac. J. Math., 18(1):169–173, 1966. +[20] J. Portillo, Y. Diao, R. Scharein, J. Arsuaga, and M. Vazquez. On the mean and variance of +the writhe of random polygons. J. Phys. A, 44(27):275004, 19, 2011. +[21] D. Rowland. Classification of book representations of K6. Journal of Knot Theory and Its +Ramifications, 26(12):1–26, 2017. +[22] K. Tsurusaki and T. Deguchi. Numerical analysis on topological entanglements of random +polygons. In Statistical models, Yang-Baxter equation and related topics, and Symmetry, +statistical mechanical models and applications (Tianjin, 1995), pages 320–329. World Sci. +Publ., River Edge, NJ, 1996. + +LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS +17 +Department of Mathematics, University of Notre Dame, 255 Hurley Bldg, Notre +Dame, IN 46556 USA +Email address: yaguillo@nd.edu +Department of Mathematics, University of Kentucky, 719 Patterson Office Tower, +Lexington, KY 40506 USA +Email address: ebu241@uky.edu +Department of Mathematics, University of North Carolina at Chapel Hill, 120 E +Cameron Avenue, Chapel Hill, NC 27599 USA +Email address: xcheng1@unc.edu +University of Kentucky, Lexington, KY 40506 +Email address: spencer.eddins@uky.edu +Mount Holyoke College, 50 College Street, South Hadley, MA 01075 USA +Email address: harre22e@mtholyoke.edu +Department of Natural Sciences and Mathematics, Lesley University, 29 Everett +Street, Cambridge, MA 02138 USA +Email address: kkozai@lesley.edu +DePaul University, 1 E. Jackson Boulevard, Chicago, IL 60604 USA +Email address: ejohnj247@gmail.com +Department of Mathematics, Purdue University, 150 N. University Street, West +Lafayette, IN 47907 USA +Email address: moralep@purdue.edu + diff --git a/kNA0T4oBgHgl3EQfI_8Y/content/tmp_files/load_file.txt b/kNA0T4oBgHgl3EQfI_8Y/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f6371761fe0a39111e2d09c4b74be20e0463345 --- /dev/null +++ b/kNA0T4oBgHgl3EQfI_8Y/content/tmp_files/load_file.txt @@ -0,0 +1,507 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf,len=506 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='02082v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='GT] 5 Jan 2023 LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS OF COMPLETE GRAPHS YASMIN AGUILLON, ERIC BURKHOLDER, XINGYU CHENG, SPENCER EDDINS, EMMA HARRELL, KENJI KOZAI, ELIJAH LEAKE, AND PEDRO MORALES Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A book embedding of a complete graph is a spatial embedding whose planar projection has the vertices located along a circle, consecutive vertices are connected by arcs of the circle, and the projections of the remaining “interior” edges in the graph are straight line segments between the points on the circle representing the appropriate vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A random embedding of a complete graph can be generated by randomly assigning relative heights to these interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We study a family of two-component links that arise as the realizations of pairs of disjoint cycles in these random embeddings of graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In particular, we show that the distribution of linking numbers can be described in terms of Eulerian numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Consequently, the mean of the squared linking number over all random embeddings is i 6, where i is the number of interior edges in the cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We also show that the mean of the squared linking number over all pairs of n-cycles in K2n grows linearly in n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Introduction Random knot models have been used to study the spatial configurations of poly- mers such as DNA, whose length is 1,000 to 500,000 times the length of the diameter of the nucleus [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' With such a long molecule confined to a compact space, DNA can become knottted, tangled, or linked.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In order for cell replication to occur, DNA must unknot itself with the aid of a special enzyme known as topoisomarase that cuts through the knotted parts of the DNA molecule and reconnects any loose ends, and problems can arise during cellular replication if topoisomarase enzymes do not work properly [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' By comparing the topological invariants of DNA before and after enzymes act on it, we can learn more about mechanisms of these enzymes and their effects on the structure of DNA [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Because many polymers are too small to image in detail, several authors have used mathematical models to study configurations of long polymer chains by introducing versions of uniform random distributions of polygonal chains in a cube [1, 2, 6, 7, 18, 20, 22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Even-Zohar, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' introduced a random model based on petal diagrams of knots and links where the distribution of links can be studied in terms of random permutations, achieving an explicit description of the asymptotic distribution for the linking number [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Random graph embeddings can be thought of as generalizations of random knot embeddings to molecules with non-linear structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In [13], a random graph em- bedding model generalizing the uniform random distributions of polygonal chains in a cube was used study the behavior of linking numbers and writhe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In this paper, 2020 Mathematics Subject Classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 57M15, 57K10, 05C10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Key words and phrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' book embeddings of graphs, linking in spatial graphs, Eulerian numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The authors were supported in part by NSF Grant DMS-1852132.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 1 2 AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES we study an alternate random embedding model similar to the Petaluma model in [11] in that the distribution of random embeddings can be described in terms of a random choice of permutations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' This model is based on book embeddings of the complete graph Kn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Rowland has classified all possible links that could appear in book embeddings of K6 [21], and we consider the more general case of links in K2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In particular, we study a special class of two-component links that appear in book embedding which are unions of disjoint monotonic cycles, and we describe the behavior of the linking number in terms of the combinatorial properties of the length of the cycles and the number of interior edges in the book embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We show that the mean value of the squared linking number grows linearly with respect to both quantitites in Theorem 10 and Theorem 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Random book embeddings Given a graph G, Atneosen [3] and Persinger [19] introduced the notion of a book embedding of G, which is a particular class of spatial embedding of a graph in which the vertices of the graph are placed along a fixed line in R3 called the spine of the book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The edges of G are embedded on half-planes, called sheets, which are bounded by the spine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Classically, the edges are drawn as disjoint circular arcs on their respective sheets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Instead, we will consider the circular diagram for a book embedding of Kn introduced by Endo and Otsuki in which the spine is a circle consisting of the vertices and edges between consecutive vertices, the pages are discs bounded by the spine, and the remaining edges are straight lines between vertices of a given page [8, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We focus on book embeddings of the complete graph K2n (or sometimes Km+n) on 2n verticles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In our model, the 2n vertices will be labeled as v1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , v2n in clockwise order around the circular spine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The perimeter of the circle will form the edges between consecutive vertices vj and vj+1 for all j ∈ {1, 2, · · · , 2n}, where the indices are taken modulo 2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We denote these edges as exterior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The remaining �2n 2 � − 2n edges are interior edges, and a book embedding is determined by dividing the interior edges among a finite number of sheets so that no two edges within a page intersect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In order to generate a random book embedding, we embed each interior edge on its own separate sheet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The ordering of sheets can then be determined by a random permutation σ of {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , �2n 2 � − 2n} with the uniform distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We can think of the permutation as giving the height order of the sheets, so that edge ei is in a sheet above edge ej if σ(i) > σ(j).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Note that a random book embedding will typically be equivalent to a book embedding with far fewer sheets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' When edges in two adjacent sheets do not cross in a circular diagram, the two sheets can be combined to a single sheet in which the two edges are embedded without intersecting, obtaining an equivalent embedding with one fewer sheet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Preliminary definitions The image of two disjoint cycles in a graph G under an embedding forms a two- component link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We can compute the linking number of any oriented link L in R3 by considering the signed crossings of the two components in a planar projection with the rule indicated in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We will denote half of the sum of the signed crossings as the linking number ℓ(L) of a link L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' This gives a quantitative measure of how interwined the two components are.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In an abuse of notation, given two LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS 3 Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A positive crossing (left) and a negative crossing (right) > v1 v2 v3 v4 v1v2v3v4 monotonic > v1 v2 v3 v4 v1v3v2v4 non-monotonic Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Monotonic (left) and non-monotonic (right) cycles oriented cycles P and Q of a graph G and a fixed embedding, we will let ℓ(P ∪ Q) mean the linking number of the image of the two cycles under the embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We introduce a special class of links in book embeddings of a graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let K2n be a complete graph with vertices enumerated as {v1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , v2n} in cyclic order along the spine of a book embedding of K2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' An oriented cycle with consecutive edges {−−−→ vi1vi2, −−−→ vi2vi3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , −−−−−→ vik−1vik, −−−→ vikvi1} is (1) strictly increasing if there is a cyclic permutation i′ 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , i′ k of i1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , ik such that i′ j < i′ j+1 for all j ∈ {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', k − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' (2) strictly decreasing if there is a cyclic permutation i′ 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , i′ k of i1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , ik such that i′ j > i′ j+1 for all j ∈ {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', k − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' (3) monotonic if the cycle is either strictly increasing or strictly decreasing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The 4-cycle on the left in Figure 2 is monotonic because beginning with the vertex v1, the vertices in the cycle in order are v1, v2, v3, v4, which has strictly increasing indices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' However, the order of the vertices in the 4-cycle on the right is v1, v3, v2, v4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The indices are not monotonic even up to cyclic permutation, so this cycle is not monotonic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Finally, we also introduce the Eulerian numbers, which arise in combinatorics as coefficients of Eulerian polynomials [4, 10, 16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let σ ∈ Sn be a permutation on {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , n}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' An ascent of the per- mutation is a value 1 ≤ k ≤ n − 1 such that σ(k) < σ(k + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The Eulerian number A(n, m) is the number of permutations σ ∈ Sn that have exactly m ascents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' As an example, we have the following exhaustive list of permutations in S3: (1,2,3);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' (1,3,2);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' (2,1,3);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' (2,3,1);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' (3,1,2);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' (3,2,1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 4 AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES Among these permutations, (1,2,3) has two ascents, (1,3,2), (2,1,3), (2,3,1), and (3,1,2) each have one ascent, and (3,2,1) has no ascents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Hence, A(3, 2) = 1, A(3, 1) = 4, and A(3, 0) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Note that A(n, n) = 0 for all n > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Additionally, there is always exactly one permutation in Sn with no ascents and exactly one permutation in Sn with n − 1 descents, which are (n,n − 1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' ,1) and (1,2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' ,n), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Hence, A(n, 0) = A(n, n − 1) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Eulerian numbers are coefficients of Eulerian polynomials, An(t) = n � m=0 A(n, m)tm, where An(t) is recursively defined by the relations, A0(t) = 1, An(t) = t(1 − t)A′ n−1(t) + An−1(t)(1 + (n − 1)t), for n > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' It is also known that A(n, m) = m+1 � k=0 (−1)k �n + 1 k � (m + 1 − k)n, and the exponential generating function for the Eulerian numbers is ∞ � n=0 ∞ � m=0 A(n, m)tm xn n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' = t − 1 t − e(t−1)x .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' From the definition, it is also evident that for a fixed n, the sum of Eulerian numbers A(n, m) over all possible values of m gives the number of all permutations, |Sn|, so that n � m=0 A(n, m) = n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='. 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Linking numbers of disjoint monotonic cycles In this paper, we will consider the distribution of linking numbers of two disjoint monotonic cycles in random book embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' First, note the following fact about the number of interior edges of two monotonic cycles in a book embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Two disjoint monotonic cycles of length m and n in a book embedding of Km+n must have an equal number of interior edges, which is also equal to half the number of crossings between the two cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let P and Q be an m-cycle and n-cycle in a book embedding, respectively, and suppose that P has i interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let −−→ vjvk be an interior edge of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Then vk−1 must be a vertex in Q, and there is a smallest h > k such that vh is a vertex in Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Then −−−−→ vk−1vh is an edge in Q which crosses the edge −−→ vjvk of P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Similarly, there is an edge −−−−→ vsvj+1 in Q that crosses −−→ vjvk, and no other edge in Q can cross −−→ vjvk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Hence, the number of crossings between P and Q is twice the number of interior edges in P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' By symmetry, this is also equal to twice the number of interior edges in Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' □ Lemma 4 implies that if P and Q are both n-cycles and P consists of n interior edges, then all edges in Q must also be interior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We now relate the number of disjoint cycles with fixed linking number to the Eulerian numbers A(m, n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS 5 Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Suppose P and Q are both strictly increasing n-cycles in K2n so that P and Q both consist of n interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The proportion of random book embeddings of K2n for which P and Q have linking number equal to ℓ is A(2n − 1, n + ℓ − 1) (2n − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let P and Q be two strictly increasing cycles, each with n interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Consider a permutation of all of the interior edges of K2n, which determines the ordering of their respective sheets in a book embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' As we are only concerned with the linking number ℓ(P ∪ Q), we only need the relative orderings of the edges of P and Q in order to resolve the signs of any crossings between interior edges of P and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' By designating these edges as e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , e2n, we may consider the permutation σ as a permutation of {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', 2n}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Without loss of generality, we label the topmost edge of the permutation of interior edges as edge e2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Since the edges in the cycle are directed so that the cycle is strictly increasing, we may begin numbering the vertices of K2n so that the initial vertex of e2n is vertex v2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We then number the vertices in cyclic order, so that the vertex in K2n that lies next in the clockwise direction from v2n is v1, the following vertex (which is the terminal vertex of e2n) is v2, and so on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The edge indices will then also be identified with their initial vertex, so that the edge −−→ v1v3 is e1, the edge −−→ v2v4 is e2, and so on, until the edge −−−−−→ v2n−1v1 is labeled e2n−1 and edge −−−→ v2nv2 is labeled e2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Under this labeled scheme, edge ej will have crossings with edges ej−1 and ej+1, where indices are taken modulo 2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The bijective function σ from {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , 2n} to itself determines the relative heights of the edges so that whenever σ(j) > σ(k), then ej is in a sheet above the sheet containing ek, and whenever σ(j) < σ(k), ej is embedded in a sheet below the sheet containing ek.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Since both cycles are strictly increasing, the sign of the crossing between edge ej and edge ej+1 can be determined by σ(j) and σ(j + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' When σ(j) > σ(j + 1), the sign of the crossing is negative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' When σ(j) < σ(j + 1), the sign of the crossing is positive, as seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Therefore, the linking number is half the quantity of the number of times σ(j) < σ(j + 1) minus the number of times σ(j) > σ(j + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' By construction, σ(2n) = 2n, so that σ(2n−1) < σ(2n) and σ(2n) > σ(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Since this results in exactly one positive crossing and one negative crossing, crossings in- volving the edge e2n have zero net effect on the linking number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We may ignore edge 2n in the permutation and consider only a further restriction of the permuta- tion to a permutation σ′ of {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , 2n − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Topologically, this can be thought of as applying a Reidemeister Move 2, sliding the topmost edge away to the exterior of the binding so that the edge e2n no longer has any crossings with edges e2n−1 and e1 Notice that σ′(j) < σ′(j + 1) is the same as an ascent in σ′ and σ′(j) > σ′(j + 1) is the same as a descent in σ′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' So the linking number of P and Q depends on the number of ascents of the permutation σ′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' If σ′ has m ascents, it has 2n − 2 − m descents, so that the linking number is 1 2[m− (2n− 2 − m)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Setting this equal to ℓ, then m = n + ℓ − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Thus, we conclude that the number of permutations in S2n−1 that lead to a linking number of ℓ is A(2n − 1, n + ℓ − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' For each permutation σ′ ∈ S2n−1, there are an equal number of permutations of the edges of K2n that restrict to σ′, so that the proportion of random book embeddings in which P and 6 AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES ej+1 ej σ(j) > σ(j + 1) negative crossing ej+1 ej σ(j) < σ(j + 1) positive crossing Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A negative crossing (left) and a positive crossing (right) in terms of σ(j) and σ(j + 1) v4 v3 v2 v1 v6 v5 e6 e1 e2 e3 e4 e5 Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Solomon’s link as a union of two monotonic 3-cycles in K6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Q have linking number ℓ is A(2n − 1, n + ℓ − 1) (2n − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' □ An example of the connection between ascents, descents, crossing signs, and linking number is shown in Figure 4 and Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Observe in Table 1 that σ(5) < σ(6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Thus j = 5 would be an ascent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' However, as σ(6) > σ(1), the signed crossing between e5 and e6 is canceled out with the signed crossing between e6 and e1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Considering only j = 1 ,2, 3, 4 we are left with four descents, which lead to four negative crossings and a linking number of −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS 7 j σ(j) crossing of ej and ej+1 ascent or descent 1 5 − descent 2 4 − descent 3 3 − descent 4 2 − descent 5 1 + ascent 6 6 − Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Signed crossings and ascents/descents in height function σ for the example in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We remark that the results from Theorem 5 extend to the more general case of two monotonic cycles of length m and n with i interior edges each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The sign of the linking number will flip whenever we reverse the orientation of one of the cycles, so if we have two monotonic cycles P and Q of length n which are not necessarily strictly increasing, this would result in replacing ℓ with −ℓ in the result of Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' However, the Eulerian numbers have the symmetry property that A(n, m) = A(n, n − 1 − m), so that A(2n − 1, n − ℓ − 1) = A(2n − 1, n + ℓ − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' This results in an identical proportion of book embeddings in which the cycles have linking number ℓ, thus whether the cycles are strictly increasing or strictly decreasing has no net effect on the distribution of linking numbers as long as they are both monotonic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In the case where P and Q have lengths m and n, respectively, Lemma 4 states that both P and Q have the same number of interior edges, which we will denote by i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Contracting Km+n along all of the exterior edges in P and Q does not alter the topological type of the link P ∪ Q, and the proportion of random book embeddings of Km+n for which the linking number of P ∪ Q is equal to ℓ will be the same as the proportion of book embeddings of the contracted graph K′ in which the linking number of P ∪ Q is equal to ℓ by a similar argument as in Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Hence, we arrive at the following when i ≥ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Corollary 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let P and Q be monotonic cycles of length m and n, respectively, in Km+n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The proportion of random book embeddings of Km+n in which the linking number of P ∪ Q is equal to ℓ is A(2i − 1, i + ℓ − 1) (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , where i ≥ 2 is the number of interior edges of both P and Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The exceptional case when i = 2 can be verified to follow the same formula as in Corollary 6 by contracting to two 3-cycles with two interior edges and one exterior edge each, then applying the argument in Theorem 5 to the interior edges only.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Table 2 gives the values of A(2i − 1, i + ℓ − 1) for 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The proportion of random book embeddings for which two cycles with i interior edges have a linking number of ℓ can be obtained by dividing the entries by (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='. The following theorem describes the number of disjoint m- and n-cycles with a given number of interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In combination with the previous corollary, this will allow for calculation of the frequency with which a random m-cycle P and disjoint n-cycle Q has linking number ℓ in a random book embedding of Km+n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 8 AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES i\\ℓ 5 4 3 2 1 0 1 2 3 4 5 1 1 2 1 4 1 3 1 26 66 26 1 4 1 120 1191 2416 1191 120 1 5 1 502 14608 88234 156190 88234 14608 502 1 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Values of A(2i − 1, i + ℓ − 1) Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let m, n ≥ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Then the number of disjoint (undirected) monotonic cycles P and Q in a book embedding of Km+n so that P is an m-cycle and Q is a n-cycle, each with 2 ≤ i ≤ min{m, n} interior edges is � m m − i ��n − 1 n − i � + � n n − i ��m − 1 m − i � , if m ̸= n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In the case that m = n, the number of disjoint cycles is � n n − i ��n − 1 n − i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Fix a labeling of the vertices of Km+n in cyclic order v1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' , vm+n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Suppose P is a m-cycle and Q is a n-cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' First, suppose P contains v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' If P has i interior edges, there are �m i � ways to choose which of the m edges in P are interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' For each of the i chosen edges in P, in order for it to be interior, there must be a vertex in the cycle Q lying between the initial and terminal vertices of the edge in P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Moreover, for each of the external edges in the cycle P, there cannot be any vertices of Q lying between the initial and terminal vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' This create i areas in which the vertices of Q must be located, one between the initial and terminal vertices of each internal edge in P, with each containing at least one vertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A stars and bars argument, in which there are n − i vertices of Q to allocate after placing one vertex of Q into each of the i spots, and i − 1 bars to separate the i spots, leads to �n−1 n−i � ways of choosing the vertices of Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' This results in � m m−i ��n−1 n−i � choices of P and Q so that P contains v1 and both cycles have i interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' By an analogous argument, there are � n n−i ��m−1 m−i � ways to choose P and Q so that Q contains v1, completing the proof when m ̸= n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' If m = n, there is no distinction between the cases when v1 is in P and v1 is in Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' □ The number of disjoint n cycles in K2n with i interior edges is tabulated in Table 3 for 3 ≤ n ≤ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The values � n n−i ��n−1 n−i � appear as OEIS sequence A103371 [17] up to a shift in indices due to the cyclic symmetry in the circular diagrams of book embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The sum over all i gives the number of ways to choose two disjoint monotonic n- cycles in K2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' An undirected monotonic cycle is determined by the vertices in the cycles, so this amounts to choosing two disjoint subsets of n vertices from the 2n vertices in K2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The number of ways in which this choice can be made is given by �2n−1 n−1 � = �2n−1 n � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Combining Theorem 7 with Theorem 5 yields the following corollary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS 9 n \\ i 1 2 3 4 5 6 7 8 9 10 3 3 6 1 4 4 18 12 1 5 5 40 60 20 1 6 6 75 200 150 30 1 7 7 126 525 700 315 42 1 8 8 196 1176 2450 1960 588 56 1 9 9 288 2352 7056 8820 4704 1008 72 1 10 10 405 4320 17640 31752 26460 10080 1620 90 1 Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Number of pairs of monotonic n-cycles each with i in- terior edges in K2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' −6 −4 −2 0 2 4 6 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='8 Linking Number Proportion of links K6 K8 K10 K12 Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Proportion of disjoint pairs of n-cycles with a given linking number in a random book embedding of K2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Corollary 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The proportion of links P ∪ Q with linking number ℓ among pairs of n-cycles P and Q in a random book embedding of K2n is n � i=1 A(2i − 1, ℓ + i − 1) (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' � n n − i ��n − 1 n − i � �2n − 1 n − 1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The values from Corollary 8 for n = 3, 4, 5, and 6 are computed and illustrated in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Notice that for two n-cycles in K2n, the maximum number of crossings that can appear is 2n, meaning that an upper bound for the absolute value of the linking number is n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Thus, we can normalize the linking number of two monotonic cycles by dividing by n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The distribution of links with a given normalized linking number when n = 100, 200, 500, and 1000, are shown in Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' As n increases, the proportion of links with linking number 0 decreases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' However, this behavior is misleading as links are distributed among a larger range of possible values for 10AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES −1 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='5 1 0 5 · 10−2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='2 K100 K200 K500 K1000 Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Proportion of links with specified normalized linking number for two monotonic n-cycles in a random book embedding of K2n −1 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='5 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='6 K100 K200 K500 K1000 Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Density of links with specified normalized linking num- ber for two monotonic n-cycles in a random book embedding of K2n the linking number as n increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Normalizing the graph to a density plot as in Figure 7 gives a very different picture of the behavior of linking numbers of disjoint n-cycles in random book embeddings of K2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' As the number of vertices increases, the normalized linking numbers tend closer to 0 as n increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' This model behaves differently from other models where the mean squared linking number grows as θ(n2), as in [1, 2, 18]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In fact, using the exponential generating function for the Eulerian numbers, we can determine an explicit formula for the mean squared linking number in terms LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS 11 of the number of interior edges i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We will need the following fact from differential calculus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Lemma 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let g(x) = xn (1−x)m .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Then for k ≥ 1, g(k)(0) = k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' �k−n+m−1 m−1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' For |x| < 1, we can express 1 1−x as the power series 1 1 − x = x0 + x1 + x2 + x3 + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Then, g(x) = xn(x0 + x1 + x2 + x3 + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' )m, so that g(k)(0) k!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' is the coefficient of xk in the power series expansion of g(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' This is the xk−n coefficient of (x0 + x1 + x2 + x3 + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' )m, which is the number of ways to choose m non-negative integers that add up to k − n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A stars and bars argument counts this as �k−n+m−1 m−1 � , with this binomial coefficient defined to be 0 if k < n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' □ We are now ready to show that the mean squared linking number of two disjoint cycles grows linearly in the number of interior edges i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Heurestically, this means that we expect that the linking number grows roughly as the square root of the number of internal edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Theorem 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let P ∪ Q be a union of disjoint n cycles with i interior edges each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Then the mean squared linking number of P ∪ Q in a random book embedding is i 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The exponential generating function for the Eulerian numbers is ∞ � n=0 ∞ � m=0 A(n, m)tm xn n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' = t − 1 t − e(t−1)x .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Multiplying both sides by t−i+1, we arrive at, ∞ � n=0 ∞ � m=0 A(n, m)tm−i+1 xn n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' = t−i+1(t − 1) t − e(t−1)x .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Notice that differentiating the left-hand side twice with respect to t and taking the limit as t → 1 yields ∞ � n=0 ∞ � m=0 � (m − i + 1)2 − (m − i + 1) � A(n, m)xn n!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Differentiating this expression 2i−1 times with respect to x and evaluating at x = 0 results in ∞ � m=0 (m − i + 1)2A(2i − 1, m) − (m − i + 1)A(2i − 1, m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' After a substitution of ℓ = m − i + 1, this becomes i−1 � ℓ=−i+1 A(2i − 1, i + ℓ − 1)ℓ2 − A(2i − 1, i + ℓ − 1)ℓ = i−1 � ℓ=−i+1 A(2i − 1, i + ℓ − 1)ℓ2 = (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='E[ℓ(P ∪ Q)2], as the symmetry in the Eulerian triangle means that the expected value of the linking number is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Hence, the second part of the summation vanishes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 12AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES We now repeat the differentiation on the exponential generating function to find an equivalent expression utilizing logarithmic differentiation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We set f(t, x) to be the exponential generating function, f(t, x) = t−i+1(t − 1) t − e(t−1)x , and first compute using L’Hˆopital’s rule, lim t→1 f(t, x) = 1 · lim t→1 t − 1 t − e(t−1)x = lim t→1 1 1 − xe(t−1)x = 1 1 − x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Using logarithmic differentiation, we find that, ft(t, x) f(t, x) = −i + 1 t + 1 t − 1 − 1 − xe(t−1)x t − e(t−1)x = −i + 1 t + (t − e(t−1)x) − (t − 1)(1 − xe(t−1)x) (t − 1)(t − e(t−1)x) = −i + 1 t + 1 − e(t−1)x + (t − 1)xe(t−1)x (t − 1)(t − e(t−1)x) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Taking the limit as t → 1 using L’Hˆopital’s rule twice, we obtain, lim t→1 ft(t, x) f(t, x) = (−i + 1) + lim t→1 (t − 1)x2e(t−1)x (t − e(t−1)x) + (t − 1)(1 − xe(t−1)x) = (−i + 1) + lim t→1 x2e(t−1)x + (t − 1)x3e(t−1)x 1 − xe(t−1)x + 1 − xe(t−1)x + (t − 1)(−x2e(t−1)x) = (−i + 1) + x2 2 · 1 1 − x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The second derivative of log f(t, x) is ftt(t, x) f(t) − �ft(t, x) f(t, x) �2 = −−i + 1 t2 − 1 (t − 1)2 + x2e(t−1)x t − e(t−1)x + (1 − xe(t−1)x)2 (t − e(t−1)x)2 = −−i + 1 t2 + −(t − e(t−1)x)2 + (t − 1)2[(t − e(t−1)x)x2e(t−1)x + (1 − xe(t−1)x)2] (t − 1)2(t − e(t−1)x)2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Taking the limit as t → 1 using L’Hˆopital’s rule four times yields, lim t→1 ftt(t, x) f(t) − �ft(t, x) f(t, x) �2 = −(−i + 1) + x3 3 · 1 (1 − x)2 − x4 12 · 1 (1 − x)2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We can then find, lim t→1 ftt(t, x) = lim t→1 f(t) � ftt(t, x) f(t) − �ft(t, x) f(t, x) �2 + �ft(t, x) f(t, x) �2� = i(i − 1) 1 − x + (−i + 1)x2 (1 − x)2 + �x3 3 + x4 6 � 1 (1 − x)3 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS 13 By Lemma 9, the (2i − 1)-th derivative in x evaluated at x = 0 is (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' � i(i − 1) + (−i + 1)(2i − 2) + 1 3 �2i − 2 2 � + 1 6 �2i − 3 2 �� = (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' � (i − 1)(−i + 2) + (2i − 2)(2i − 3) 6 + (2i − 3)(2i − 4) 12 � = (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' i 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Hence, (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='E[ℓ(P ∪ Q)2] = (2i − 1)!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' i 6, completing the proof of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' □ Using Theorem 10, we can find the asymptotic behavior of the mean squared linking number over all pairs of disjoint n cycles in K2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Recall that a function f(n) is in order θ(n) if there are positive constants a, A, and N such that an ≤ f(n) ≤ An for all n > N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Theorem 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Let n ≥ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Then the mean squared linking number of two cycles P and Q taken over all pairs of disjoint n-cycles across all random book embeddings of K2n is in order θ(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' By combining Theorem 7 and Theorem 10 and summing over the number of interior edges, the mean squared linking number is 1 �2n−1 n−1 � n � i=2 � n n − i ��n − 1 n − i � i 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Since i � n n − i � = i �n i � = n �n − 1 i − 1 � , this becomes (1) 1 �2n−1 n−1 � n � i=2 n 6 �n − 1 i − 1 �2 = n 6 · 1 �2n−1 n−1 � n � i=2 �n − 1 i − 1 �2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Using Vandermonde’s identity, the summation part of the right-hand side becomes n � i=2 �n − 1 i − 1 �2 = �n−1 � i=0 �n − 1 i �2� − �n − 1 0 �2 = �2n − 2 n − 1 � − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Thus, Equation (1) yields n 6 · 1 �2n−1 n−1 � ��2n − 2 n − 1 � − 1 � = n 6 � n 2n − 1 − 1 �2n−1 n−1 � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' For an upper bound, we have n 6 � n 2n − 1 − 1 �2n−1 n−1 � � ≤ n 6 · n 2n − 1 ≤ n 6 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' For a lower bound, we note that if n ≥ 3, �2n − 1 n − 1 � = 2n − 1 1 2n − 2 2 · · · · n + 1 n − 1 · n n ≥ (2n − 1)(n − 1) ≥ 2(2n − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 14AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES 5 10 15 20 25 30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='5 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='5 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='5 n Mean squared linking number Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Mean squared linking number of two disjoint n-cycles in a random book embedding of K2n Hence, n 6 � n 2n − 1 − 1 �2n−1 n−1 � � ≥ n 6 � n 2n − 1 − 1 2(2n − 1) � = n 6 · n − 1 2 2n − 1 = n 6 · 1 2 = n 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' □ Sample calculations of the mean squared linking number of two n-cycles in K2n can be seen to asymptotically approach n 12, as seen from the nearly linearly re- lationship between n and the mean squared linking number in Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' When n = 100 and n = 1000, the approximate value of the mean squared linking number can be computed from the summation formula in Theorem 11 to be ≈ 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='37521 and ≈ 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='375, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Links in random book embeddings of K6 In this section, we consider the special case of random book embeddings of K6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Rowland has studied all possible topological types of book embeddings of K6, showing that the set of non-trivial knots and links that appear are the trefoil knot, figure-eight knot, the Hopf link, and the Solomon’s link [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Any two-component link in K6 must consist of two disjoint 3-cycles, and every 3-cycle is necessarily monotonic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Moreover, the trivial link has linking number 0, the Hopf link has link- ing number ±1, and the Solomon’s link (shown in Figure 4) has linking number ±2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Hence, we can utilize Theorems 7 and 10 and in the case that n = 3 to determine the probabilities of each type of link occuring in a random book embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We separately consider the cases when the number of interior edges in the 3- cycles is i = 1, 2, and 3 as in Figure 9, and determine the probability of each type of link occuring in each case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' We can then combine with the counts in Table 3 to compute the overall probability that a randomly selected two-component link is either trivial, a Hopf link, or a Solomon’s link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS 15 Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Projections of two 3-cycles in K6 with i = 1 (left), i = 2 (middle), and i = 3 (right) interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' When i = 1, it is evident that since the projection of the two cycles has no crossings, then the two-component link is trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' When i = 2, Table 2 implies that the probability that the two cycles are the Hopf link is p2 = 1 3, and the probability that the two cycles are the trivial link is 1 − p2 = 2 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' When i = 3, Table 2 implies that the probability that the two cycles form the Solomon’s link is q3 = 1 60, the probability that the two cycles form the Hopf link is p3 = 13 30, and the probability that the two cycles form the trivial link is 1 − p3 − q3 = 11 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Table 3 details the frequency with each the 10 cycles in K6 have 1, 2, or 3 interior edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' From this, we determine that the probability that a randomly chosen pair of disjoint 3-cycles in a random book embedding of K6 is trivial is 1 10 � 3 · 1 + 6 · 2 3 + 1 · 11 20 � = 151 200.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Similarly, the probability that a randomly chosen pair of disjoint 3-cycles in a random book embedding of K6 is the Hopf link is 1 10 � 3 · 0 + 6 · 1 3 + 1 · 13 30 � = 73 300.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Finally, the probability that a randomly chosen pair of disjoint 3-cycles in a random book embedding of K6 is the Solomon’s link is 1 10 � 3 · 0 + 6 · 0 + 1 · 1 60 � = 1 600.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Since K6 contains 10 distinct disjoint pairs of 3-cycles, this implies that in a random book embedding of K6, the expected number of trivial links is 151 20 , the expected number of Hopf links is 73 30, and the expected number of Solomon’s links is 1 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' It is a classical result in spatial graph theory that every embedding of K6 contains at least one non-trivial link [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In a random book embedding of K6, the expected number of non-trivial links is 49 20, with nearly all of the non-trivial links represented by Hopf links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Acknowledgments The authors would like to thank the National Science Foundation for supporting this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' This research was partially supported by National Science Foundation Grant DMS-1852132.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In addition, the authors would like to thank the Department of Mathematics at Rose-Hulman Institute of Technology for their hospitality and for hosting the 16AGUILLON, BURKHOLDER, CHENG, EDDINS, HARRELL, KOZAI, LEAKE, AND MORALES Rose-Hulman Institute of Technology Mathematics Research Experience for Un- dergraduates, where most of this work was completed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' References [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Arsuaga, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Borgo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Diao, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Scharein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The growth of the mean average crossing number of equilateral polygons in confinement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A, 42(46):465202, 9, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [2] Javier Arsuaga, T Blackstone, Yuanan Diao, E Karadayi, and M Saito.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Linking of uniform random polygons in confined spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Journal of Physics A: Mathematical and Theoretical, 40:1925, 02 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [3] Gail Atneosen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' One-dimensional n-leaved continua.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Fundamenta Mathematicae, 74:43–45, 1972.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [4] Louis Comtet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Advanced Combinatorics: The Art of Finite and Infinite Expansions, chapter 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Reidel Publishing Company, Boston, USA, revised and enlarged edition, 1974.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [5] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Conway and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' McA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Gordon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Knots and links in spatial graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Graph Theory, 7(4):445–453, 1983.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [6] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Diao, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Ernst, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Saarinen, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Ziegler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Generating random walks and polygons with stiffness in confinement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A, 48(9):095202, 19, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [7] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Diao, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Pippenger, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Sumners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' On random knots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In Random knotting and linking (Vancouver, BC, 1993), volume 7 of Ser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Knots Everything, pages 187–197.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' World Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Publ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', River Edge, NJ, 1994.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [8] Toshiki Endo and Takashi Otsuki.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Notes on spatial representations of graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Hokkaido Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', 23(3):383–398, 1994.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [9] Toshiki Endo and Takashi Otsuki.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Knots and links in certain spatial complete graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Comb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Ser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' B, 68:23–35, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [10] Leonhard Euler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Foundations of Differential Calculus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Springer Science and Business Media, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [11] Chaim Even-Zohar, Joel Hass, Nati Linial, and Tahl Nowik.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Invariants of random knots and links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Discrete & Computational Geometry, 56(2):274–314, Jun 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [12] Erica Flapan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Knots, Molecules and the Universe: An Introduction to Topology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' American Mathematical Society, United States of America, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [13] Erica Flapan and Kenji Kozai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Linking number and writhe in random linear embeddings of graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Chem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', 54(5):1117–1133, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [14] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Harrell M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Beals, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Gross.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Dna and knot theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' TIEM, 1999, Online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [15] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Mishra and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Bhushan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Knot theory in understanding proteins.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Biol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', 65:1187– 1213, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [16] OEIS Foundation Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A008292 - OEIS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' https://oeis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='org/A008292.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Accessed: 2022-10-18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [17] OEIS Foundation Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A103371 - OEIS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' https://oeis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='org/A103371.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Accessed: 2022-10-18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [18] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Panagiotou, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Millett, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Lambropoulou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' The linking number and the writhe of uniform random walks and polygons in confined spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A, 43(4):045208, 28, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [19] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Persinger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Subsets of n-books in E3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Pac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', 18(1):169–173, 1966.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [20] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Portillo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Diao, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Scharein, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Arsuaga, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Vazquez.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' On the mean and variance of the writhe of random polygons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' A, 44(27):275004, 19, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [21] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Rowland.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Classification of book representations of K6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Journal of Knot Theory and Its Ramifications, 26(12):1–26, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' [22] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Tsurusaki and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Deguchi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Numerical analysis on topological entanglements of random polygons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' In Statistical models, Yang-Baxter equation and related topics, and Symmetry, statistical mechanical models and applications (Tianjin, 1995), pages 320–329.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' World Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Publ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=', River Edge, NJ, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' LINKING NUMBER OF MONOTONIC CYCLES IN RANDOM BOOK EMBEDDINGS 17 Department of Mathematics, University of Notre Dame, 255 Hurley Bldg, Notre Dame, IN 46556 USA Email address: yaguillo@nd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='edu Department of Mathematics, University of Kentucky, 719 Patterson Office Tower, Lexington, KY 40506 USA Email address: ebu241@uky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='edu Department of Mathematics, University of North Carolina at Chapel Hill, 120 E Cameron Avenue, Chapel Hill, NC 27599 USA Email address: xcheng1@unc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='edu University of Kentucky, Lexington, KY 40506 Email address: spencer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='eddins@uky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='edu Mount Holyoke College, 50 College Street, South Hadley, MA 01075 USA Email address: harre22e@mtholyoke.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='edu Department of Natural Sciences and Mathematics, Lesley University, 29 Everett Street, Cambridge, MA 02138 USA Email address: kkozai@lesley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='edu DePaul University, 1 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' Jackson Boulevard, Chicago, IL 60604 USA Email address: ejohnj247@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='com Department of Mathematics, Purdue University, 150 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content=' University Street, West Lafayette, IN 47907 USA Email address: moralep@purdue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} +page_content='edu' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/kNA0T4oBgHgl3EQfI_8Y/content/2301.02082v1.pdf'} diff --git a/ldAyT4oBgHgl3EQfk_iC/content/2301.00444v1.pdf b/ldAyT4oBgHgl3EQfk_iC/content/2301.00444v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e5ef95a24a3d19976c45b423a46122065ec9f93c --- /dev/null +++ b/ldAyT4oBgHgl3EQfk_iC/content/2301.00444v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cef1b856955189538ca7666a5c1f6f7f57ade12ce1227d5af27deb70b6cb1284 +size 517057 diff --git a/ldAyT4oBgHgl3EQfk_iC/vector_store/index.pkl b/ldAyT4oBgHgl3EQfk_iC/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d2dabcab8f04c8cf1142a29fec5bcb53fca18394 --- /dev/null +++ b/ldAyT4oBgHgl3EQfk_iC/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a8d12fbfd223c818274d996b9bb50105a8283de2139627be580ad1bce138f16 +size 128636 diff --git a/m9E_T4oBgHgl3EQf7Ry5/content/tmp_files/2301.08369v1.pdf.txt b/m9E_T4oBgHgl3EQf7Ry5/content/tmp_files/2301.08369v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..448b8580ee660971bbfcfcd75b07536745fd0b57 --- /dev/null +++ b/m9E_T4oBgHgl3EQf7Ry5/content/tmp_files/2301.08369v1.pdf.txt @@ -0,0 +1,4866 @@ +arXiv:2301.08369v1 [math.SP] 20 Jan 2023 +Eigenvectors of graph Laplacians: a landscape +Jean-Guy CAPUTO and Arnaud KNIPPEL +January 23, 2023 +Laboratoire de Math´ematiques, INSA de Rouen Normandie, +Normandie Universit´e +76801 Saint-Etienne du Rouvray, France +E-mail: caputo@insa-rouen.fr, arnaud.knippel@insa-rouen.fr +Abstract +We review the properties of eigenvectors for the graph Laplacian ma- +trix, aiming at predicting a specific eigenvalue/vector from the geometry +of the graph. After considering classical graphs for which the spectrum +is known, we focus on eigenvectors that have zero components and ex- +tend the pioneering results of Merris (1998) on graph transformations +that preserve a given eigenvalue λ or shift it in a simple way. +These +transformations enable us to obtain eigenvalues/vectors combinatorially +instead of numerically; in particular we show that graphs having eigen- +values λ = 1, 2, . . . , 6 up to six vertices can be obtained from a short list +of graphs. For the converse problem of a λ subgraph G of a λ graph G”, +we prove results and conjecture that G and G” are connected by two of +the simple transformations described above. +1 +Introduction +The graph Laplacian is an important operator for both theoretical reasons and +applications [1]. As its continuous counterpart, it arises naturally from conser- +vation laws and has many applications in physics and engineering. The graph +Laplacian has real eigenvalues and eigenvectors can be chosen orthogonal. This +gives rise to a Fourier like description of evolution problems on graphs; an ex- +ample is the graph wave equation, a natural model for weak miscible flows on +a network, see the articles [2], [3]. This simple formalism proved very useful +for modeling the electrical grid [4] or describing an epidemic on a geographi- +cal network [5]. Finally, a different application of graph Laplacians is spectral +clustering in data science, see the review [6]. +1 + +Almost sixty years ago, Mark Kac [7] asked the question : can one Hear +the Shape of a Drum? +Otherwise said, does the spectrum of the Laplacian +characterize the graph completely ? We know now that there are isospectral +graphs so that there is no unique characterization. However, one can ask a +simpler question: can one predict eigenvalues or eigenvectors from the geometry +of the graph? From the literature, this seems very difficult, most of the results +are inequalities, see for example the beautiful review by Mohar [8] and the +extensive monograph [9]. +Many of the results shown by Mohar [8] are inequalities on λ2, the first +non zero eigenvalue. This eigenvalue is related to the important maximum cut +problem in graph theory and also others. Mohar [8] also gives some inequalities +on λn, the maximum eigenvalue, in terms of the maximum of the sum of two +degrees. Another important inequality concerns the interlacing of the spectra +of two graphs with same vertices, differing only by an edge. However, little is +known about the bulk of the spectrum, i.e. the eigenvalues between λ2 and +λn. A very important step in that direction was Merris’s pioneering article [10] +where he introduced ”Laplacian eigenvector principles” that allow to predict +how the spectrum of a graph is affected by contracting, adding or deleting +edges and/or of coalescing vertices. Also, Das [11] showed that connecting an +additional vertex to all vertices of a graph increases all eigenvalues (except 0) +by one. +Following these studies, in [12] we characterized graphs which possess eigen- +vectors of components ±1 (bivalent) and 0, ±1 (trivalent). This is novel because +we give exact results, not inequalities. Here, we continue on this direction and +focus on eigenvectors that have some zero coordinates, we term these soft nodes; +such soft nodes are important because there, no action can be effected on the +associated mechanical system [3]. In this article, we use the important proper- +ties of graphs with soft nodes, we call these soft-graphs, to highlight eigenval- +ues/eigenvectors that can be obtained combinatorially (instead of numerically). +We first show that eigenvalues of graph Laplacians with weights one are integers +or irrationals. Then we present well known classical graphs whose spectrum is +known exactly. We describe five graph transformations that preserve a given +eigenvalue and two that shift the eigenvalue in a simple way. Among the trans- +formations that preserve an eigenvalue, the link was explicitly introduced in +the remarkable article by Merris (link principle) [10]. The articulation and the +soldering were contained in the same paper and we choose to present elemen- +tary versions of these transformations. We find two new transformations that +preserve an eigenvalue: the regular expansion and the replacement of a cou- +pling by a square. We also present transformations that shift an eigenvalue in +a predictable way: insertion of a soft node, addition of a soft node, insertion of +a matching. The first is new, the second and third were found by Das [11] and +Merris [10] respectively. +In the last part of the article we enumerate all the small graphs up to six ver- +tices that have a given eigenvalue λ and explain the relations between them using +2 + +the transformations discussed previously. It is remarkable that these graphs can +all be obtained from a short list of graphs. However the question is open for +bigger graphs. Using the transformations mentioned above, λ soft graphs can +be made arbitrarily large. The converse problem of a λ subgraph G of a λ graph +G” is considered. We show that the matrix coupling the two Laplacians L(G) +and L(G′), where G′ = G” − G, is a graph Laplacian. If the remainder graph +G′ is λ, then it is formed using the articulation or link transformation. It is +possible that the remainder graph G′ is not λ as long as it shares an eigenvector +with G. Then the two may be related by adding one or several soft nodes to G′. +Finally, an argument shows that if G′ is not λ and does not share an eigenvector +with G, the problem has no solution. We finish the article by examining the λ +soft graphs for λ = 1, 2, . . . , 6 and insist on minimal λ soft graphs as generators +of these families, using the transformations above. +The article is organized as follows. Section 2 introduces the main definitions. +In section 3 we consider special graphs (chains, cycles, cliques, bipartite graphs) +whose Laplacian spectrum is well known. The graph transformations preserving +an eigenvalue are presented in section 4. Section 5 introduces graph transfor- +mations which shift eigenvalues. +Finally section 6 introduces λ soft graphs, +discusses λ sub-graphs and presents a classification of graphs up to six vertices. +2 +The graph Laplacian : +notation, definitions +and properties +We consider a graph G(V, E) with a vertex set V of cardinality n and edge set +E of cardinal m where n, m are finite. The graph is assumed connected with +no loops and no multiple edges. The graph Laplacian matrix [9] is the (n, n) +matrix L(G) or L such that +Lij = −1 if edge i j exists, 0 otherwise, +Lii = mi, degree of i, +(1) +where the degree of i is the number of edges connected to vertex i. +The matrix L is symmetric so that it has real eigenvalues and we can always +find a basis of orthogonal eigenvectors. Specifically we arrange the eigenvalues +λi as +λ1 = 0 ≤ λ2 ≤ · · · ≤ λn. +(2) +We label the associated eigenvectors v1, v2, . . . , vn. +We have the following properties +• v1 = 1 the vector whose all components are 1. +• Let vi +k be the k component of an eigenvector vi, +i > 1. An immediate +consequence of the vi being orthogonal to v1 is � +k vi +k = 0. +3 + +A number of the results we present hold when Lij ̸= −1 and Lii = � +j∼i Lij , +this is the generalized Laplacian. We will indicate which as we present them. +Regular graphs +The graph Laplacian can be written as +L = D − A +where A is the adjacency matrix and D is the diagonal matrix of the degrees. +We recall the definition of a regular graph. +Definition 2.1 (Regular graph) A graph is d-regular if every vertex has the +same degree d. +For regular graphs D = dIdn, where Idn is the identity matrix of order n. For +these graphs, all the properties obtained for L in the present article carry over +to A. +We will use the following definitions. +Definition 2.2 (Soft node ) A vertex s of a graph is a soft node for an eigen- +value λ of the graph Laplacian if there exists an eigenvector x for this eigenvalue +such that xs = 0. +An important result due to Merris [10] is +Theorem 2.3 Let G be a graph with n vertices. If 0 ̸= λ < n is an eigenvalue +of L(G) then any eigenvector affording λ has component 0 on every vertex of +degree n − 1. +Definition 2.4 (k-partite graph) A k-partite graph is a graph whose vertices +can be partitioned into k different independent sets so that no two vertices within +the same set are adjacent. +Definition 2.5 (cycle) A cycle is a connected graph where all vertices have +degree 2. +Definition 2.6 (chain) A chain is a connected graph where two vertices have +degree 1 and the other vertices have degree 2. +Definition 2.7 (clique) A clique or complete graph Kn is a simple graph +where every two vertices are connected. +In the article we sometimes call configuration a vertex valued graph where +the values correspond to an eigenvector of the graph Laplacian. +4 + +2.1 +Eigenvalues are integers or irrationals +We have the following result +Theorem 2.8 If the eigenvalue λ is an integer, then there exist integer eigen- +vectors. +To see this consider the linear system +(L − λI)X = 0. +It can be solved using Gauss’s elimination. This involves algebraic manipula- +tions so that the result X is rational. If X is rational, then multiplying by +the product of the denominators of the entries, we obtain an eigenvector with +integer entries. +We now show that the eigenvalues of a graph Laplacian are either integers +or irrationals. We have the following rational root lemma on the roots of poly- +nomials with integer coefficients, see for example [13] +Lemma 2.9 Rational root +Consider the polynomial equation +anxn + an−1xn−1 + · · · + a0 = 0 +where the coefficients ai are integers. +Then, any rational solution x = p/q, +where p, q are relatively prime is such that p divides a0 and q divides an . +A consequence of this is +Theorem 2.10 The eigenvalues of a graph Laplacian are either integers or +irrationals. +Proof. Consider the equation associated to the characteristic polynomial asso- +ciated to the graph Laplacian, it has the form +anxn + an−1xn−1 + · · · + a1x, +because the graph is connected so that there is only one 0 eigenvalue. Assume +that the eigenvalue is of the form x = p/q with p, q are relatively prime integers. +Then from the lemma above, p divides a0 and q divides an. Since an = ±1, +q = 1 so that x = p is an integer. +□ +The fact that some graphs have integer spectrum was discussed by Grone +and Merris [14]. Many of their results are inequalities for λ2 and λn−1. Our +results complement their approach. +5 + +3 +Special graphs +3.1 +Cliques and stars +The clique Kn has eigenvalue n with multiplicity n − 1 and eigenvalue 0. The +eigenvectors for eigenvalue n can be chosen as vk = e1 − ek, +k = 2, . . . , n. To +see this note that +L = nIn − 1, +where In is the identity matrix of order n and 1 is the (n, n) matrix where all +elements are 1. +A star of n vertices Sn is a tree such that one vertex , say vertex 1, is +connected to all the others. For a star Sn, the eigenvalues and eigenvectors are +• λ = 1 multiplicity n − 2 , eigenvector e2 − ek, +k = 3, . . . , n +• λ = n multiplicity 1 , eigenvector (n + 1)e1 − �n +k=2 ek +• λ = 0 multiplicity 1 , eigenvector ˆ1 +3.2 +Bipartite and multipartite graphs +Consider a bipartite graph Kn1,n2. The Laplacian is +L = + + + + + + + + + + + + +n2 +0 +. . . +0 +−1 +. . . +−1 +0 +n2 +0 +. . . +−1 +. . . +−1 +. . . +. . . +. . . +. . . +. . . +. . . +0 +. . . +0 +n2 +−1 +. . . +−1 +−1 +. . . +−1 +n1 +0 +. . . +0 +−1 +. . . +−1 +0 +n1 +. . . +0 +. . . +. . . +. . . +. . . +−1 +. . . +−1 +0 +0 +. . . +n1 + + + + + + + + + + + + +, +(3) +where the top left bloc has size n1 ×n1, and the bottom right bloc n2 ×n2. The +eigenvalues with their multiplicities denoted as exponents are +01, +nn2−1 +1 +, +nn1−1 +2 +, +(n1 + n2)1. +Eigenvectors for n1 can be chosen as en1+1 − ei (i = n1 + 2, . . . , n1 + n2). The +eigenvector for n = n1 + n2 is (1/n1, . . . , 1/n1, −1/n2, . . . , −1/n2)T . +Similarly, the spectrum of a multipartite graph Kn1,n2,...np is +01, +(n − n1)n1−1, +(n − n2)n2−1, . . . , +(n − np)np−1, +np. +The eigenvectors associated to n − n1 are composed of 1 and −1 in two vertices +of part 1 padded with zeros for the rest. +6 + +3.3 +Cycles +For a cycle, the Laplacian is a circulant matrix, therefore its spectrum is well- +known. The eigenvalues are +µk = 4 sin2 +�(k − 1)π +n +� +, +k = 1, . . . , n. +(4) +They are associated to the complex eigenvectors vk whose components are +vk +j = exp +�i(j − 1)(k − 1)2π +n +� +, j = 1, . . . n. +(5) +The real eigenvectors wk, xk are, +wk = (0, sin(ak), sin(2ak), . . . , sin((n − 1)ak))T , +(6) +xk = (1, cos(ak), cos(2ak), . . . , cos((n − 1)ak))T , +(7) +ak = 2(k − 1)π +n +(8) +Ordering the eigenvalues, we have +λ1 = µ1 = 0, +(9) +λ2 = λ3 = µ2, +(10) +λ2k = λ2k+1 = µk+1, +(11) +. . . +(12) +For n = 2p + 1 +λ2p = λ2p+1 = µp+1 +For n = 2p +λ2p = µp = 4 +is an eigenvalue of multiplicity 1; an eigenvector is (1, −1, . . ., 1, −1)T. In all +other cases, the eigenvalues have multiplicity two so that all vertices are soft +nodes. +Remark that the maximum number of 0s is n/2. To see this, note that if +two adjacent vertices have value 0 then their neighbors in the cycle must have +0 as well and we only have 0s , but the null vector is not an eigenvector. This +means that we have at most n/2 0s. This bound is reached for n even. +3.4 +Chains +For chains Cn, there are only single eigenvalues, they are [15] +λk = 4 sin2(π(k − 1) +2n +) , k = 1, . . . , n. +(13) +7 + +The eigenvector vk has components +vk +j = cos +�π(k − 1) +n +(j − 1 +2) +� +, j = 1, . . . n. +(14) +Obviously the cosine is zero if and only if: +(k − 1)(2j − 1) = n(1 + 2m), +(15) +where m is an integer. There is no solution for n = 2α, for α a positive integer. +Apart from this case, there is always at least one soft node. If n is a prime +number, the middle vertex j = (n + 1)/2 is the only soft node. For k odd, all +vertices j such that 2j − 1 divides n have a zero value, including the middle +vertex. +For n odd, chains and cycles share (n−1)/2 eigenvalues and eigenvectors. To +see this consider a chain with n = 2p + 1. All k = 2q + 1 give a chain eigenvalue +λk = 4 sin2( +πq +2p+1) that is also a cycle eigenvalue. The eigenvector components +vq +j are such that vq +1 = vq +2p+1. +4 +Transformations preserving eigenvalues +In this section, we present four main transformations of graphs such that one +eigenvalue is preserved. These are the link between two vertices, the articula- +tion, the soldering and the contraction/expansion. The first three transforma- +tions are in the literature in a general form; we choose to present them in their +most elementary form. +Furthermore, these transformations will all be unary, they act on a single graph. +Binary transformations can be reduced to unary transformations for non con- +nected graphs. +Using these transformations we can generate new graphs that have a soft +node, starting from minimal graphs having soft nodes. +4.1 +Link between two equal vertices +An important theorem due to Merris [10] connects equal component vertices. +Theorem 4.1 Link between two vertices : Let λ be an eigenvalue of L(G) for +an eigenvector x. If xi = xj then λ is an eigenvalue of L(G′) for x where the +graph G′ is obtained from G by deleting or adding the edge e = ij. +This transformation preserves the eigenvalue and eigenvector. +It applies to +multiple graphs. Fig. 1 shows examples of the transformation. +8 + +L2S +−1 +−1 +1 +1 +Ch6 +L2S +L2S +−1 +−1 +1 +1 +L2S +y6 +C +−1 +−1 +1 +1 +−1 +−1 +1 +1 +Ch +2 +3 +−1 +1 +1 +−1 +−1 +−1 +1 +1 +Figure 1: Example of the transform : link between two equal vertices. +We have the following corollary of the theorem. +Theorem 4.2 Let λ be an eigenvalue of two graphs G1 and G2 for respective +eigenvectors x1, x2 with two vertices i, j, such that x1 +i ̸= 0 or x2 +j ̸= 0 . Then the +graph G(V1 ∪ V2, E1 ∪ E2 ∪ ij) affords the eigenvector y = x2 +j +�x1 +0 +� ++ x1 +i +� 0 +x2 +� +for λ. +This allows to generate many more graphs that have an eigenvalue λ. +4.2 +Articulation +An elementary transformation inspired by Merris’s principle of reduction and +extension [10] is to add a soft node to an existing soft node. This does not +change the eigenvalue. We have the following result. +Theorem 4.3 Articulation (A) : Assume a graph G(V, E) with n vertices +where x is an eigenvector such that xi = 0 for an eigenvalue λ. +Then, the +extension x′ of x such that x′ +1:n = x1:n and x′ +n+1 = 0 is an eigenvector for +λ for the Laplacian L(G′) where G′(V ′, E′) such that V ′ = V ∪ (n + 1) and +E′ = E ∪ i(n + 1). +9 + +6 +−2 +1 +1 +A +23 +1 +1 +−2 +5. +5. +Figure 2: Example of the articulation property. The large dot corresponds to a +soft node. +The general case presented by Merris [10] amounts to applying several times +this elementary transformation. +The transformation is valid for graphs with arbitrary weights and the extended +edges can have arbitrary weights. +Fig. 2 illustrates this property on the two graphs labeled 5.6 and 5.23 in +the classification given in [1]. An immediate consequence of this elementary +transform is that any soft node can be extended into an arbitrarily large graph +of soft nodes while preserving the eigenvalue and extending the eigenvector in a +trivial way. Fig. 3 shows two graphs that have the same eigenvalue λ = 1 and +that are connected by the articulation transform. +1 +1 +−2 +6 +−2 +1 +1 +A +5. + + + + +Figure 3: Two graphs connected by the articulation transform. +4.3 +Soldering +A consequence of the contraction principle of Merris [10] is that coalescing two +soft nodes of a graph leaves invariant the eigenvalue. This is especially important +because we can ”solder” two graphs at a soft node. +10 + +Theorem 4.4 Soldering : Let x be an eigenvector affording λ for a graph G. +Let i and j be two soft nodes without common neighbors. Let G′ be the graph +obtained from G by contracting i and j and x′ be the vector obtained from x by +deleting its jth component. Then x′ is an eigenvector of L(G′) for λ. +−1 +−1 +−1 +−1 +1 +1 +1 +1 +−1 +−1 +−1 +−1 +1 +1 +1 +1 +1 +−1 +1 +−1 +1 +−1 +1 +−1 +6.73 +1 +1 +−1 +−1 +−1 +1 +−1 +1 +−1 +1 +1 +−1 +Ch 6 +1 +1 +−1 +−1 +Ch 6 +1 +1 +−1 +−1 +1 +−1 +1 +−1 +1 +−1 +Ch 3 +1 +−1 +Ch 3 +1 +−1 +Ch 3 +1 +−1 + + + + +Figure 4: Examples of the soldering transform. +This transformation is valid for graphs with arbitrary weights. +4.4 +Regular expansion of a graph +We have the following theorem. +Theorem 4.5 Let x be an eigenvector of a graph G for λ and let i be a vertex +connected only to p soft nodes. Let G′ be the graph obtained from G by replacing +i by a d-regular graph whose k vertices are all connected to the p soft nodes. Then +λ = p and an eigenvector x′ of G′ is formed by assigning to the new vertices, +the value x′ +j = xi/k. +Proof. Without loss of generality, we can assume that i = n and that the p soft +11 + +nodes are n − p + 1, . . . , n − 1. We have + + + + +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +. . . +0 +. . . +0 +−1 +. . . +−1 +p + + + + + + + + +. . . +0 +0 +xn + + + + = λ + + + + +. . . +0 +0 +xn + + + + +The nth line reads +pxn = λxn +so that λ = p. The n − 1th line reads +α + (−1)xn = pxn−1 = 0 +where α is the sum of the other terms. +Let us detail the eigenvector relation for the Laplacian for G′. Consider any +new vertex j linked to the p soft nodes and to d new nodes. The corresponding +line of the eigenvector relation for the Laplacian for G′ reads +(d + p)x′ +j + +� +i∼j,i≥n +(−1)x′ +i = λ′x′ +j. +This implies +(d + p − λ′)x′ +j = +� +i∼j,i≥n +x′ +i. +An obvious solution is +λ′ = λ = p, +x′ +i = x′ +n ∀i ≥ n + 1. +The value x′ +n is obtained by examining line n − 1. We have +α + +n−k−1 +� +i=n +(−1)x′ +i = 0 +so that +x′ +n = xn +k . +In fact, we can get all solutions by satisfying the two conditions +∀j ≥ n dx′ +j = +� +i∼j +x′ +i, +xn = +� +i≥n +x′ +i. +(16) +□ +Fig. 5 shows examples of expansion from a single soft node for different +values of d. Here the eigenvalue is 1. Fig. 6 shows examples of expansion from +two soft nodes. The eigenvalue is 2. For d = 2, the values at the edges at the +bold edges are such that their sum is equal to 1. For d = 2, the values at the +12 + +triangle are all equal to t, the same holds for the square with a value s. These +values verify 3t + 4s = 1. +−1 +1 +Ch 3 +/2 +1 +/2 +1 +−1 +/ +1 4 +/ +1 4 +/ +1 4 +/ +1 4 +/2 +1 +/2 +1 +−1 +/ +1 3 +/ +1 3 +/ +1 3 +/ +1 9 +/ +1 9 +/ +1 9 +/9 +2 +/9 +2 +/9 +2 +−1 +d= 0 +d=1 +−1 +−1 +d= 2 +E +E +E +Figure 5: Examples of expansion from a single soft node. +/2 +1 +/1 4 +/1 4 +/2 +1 +/2 +1 +d= 0 +−1 +−1 +E +−1 +1 +E +E +E +−1 +−1 +d=2 +d=1 + + + + +Figure 6: Examples of expansion from two soft nodes. For d = 2, the values +at the triangle are all equal to t, the same holds for the square with a value s. +These values verify 3t + 4s = 1. +13 + +4.5 +Replace coupling by square +We have the following transformation that leaves the eigenvalue unchanged [12]. +Theorem 4.6 (Replace an edge by a soft square) +Let x be an eigenvector of the Laplacian of a graph G for an eigenvalue λ. Let +G′ be the graph obtained from G by deleting a joint ij such that xi = −xj and +adding two soft vertices k, l ∈ V (G′) for the extension x′ of x (i.e. x′ +m = xm +for m ∈ V (G) and x′ +k = x′ +l = 0) and the four edges ik, kj, il, lj. Then, x′ is an +eigenvector of the Laplacian of G′ for the eigenvalue λ. +This result was proved in [12] for a graph with weights 1. Here we generalize it +to a graph with arbitrary weights. +Proof. +The eigenvalue relation at vertex i reads +(di − λ)xi = +� +m∼i,m̸=j +wi,mxm + wi,jxj +Since xi = −xj, this implies +(di + wi,j − λ)xi = +� +m∼i,m̸=j +wi,mxm. +Introducing the two new vertices k, l such that x′ +k = x′ +l = 0 connected to i by +edges of weights wi,k = αwi,j, +wi,l = (1 − α)wi,j, the relation above leads to +(di + wi,k + wi,l − λ)x′ +i = +� +m∼i +wi,mx′ +m + wi,kx′ +k + wi,lx′ +l, +which shows that x′ is eigenvector of the new graph. +□ +See Fig. 7 for an illustration of the theorem. +−1 +1 +1 +−1 +Figure 7: Replacement of coupling by a square, in both cases the eigenvalue is +λ = 2. +14 + +5 +Transversality : change of eigenvalue +Here we present operators that change the eigenvalue of a graph Laplacian in +a predictable way. The operators shift the eigenvalue λ to λ + 1 for the first +two and λ + 2 for the third one. At the end of the section we introduce the +eigenvalue of a product graph. +5.1 +Inserting soft nodes +Theorem 5.1 Let x be an eigenvector of a graph G with weights 1 for λ. As- +sume we can pair the non zero components of x as {i, j} where xi = −xj non +zero. Let G′ be the graph obtained from G by including k soft nodes between +each pair {i, j}. The vector x′ so obtained is an eigenvector of the Laplacian of +G′ for eigenvalue λ + k. +Proof. Let i, j ∈ V (G) be a pair such that xi = −xj. The eigenvector equation +reads +dixi − +� +m∼i +xm = λxi. +Introducing k new vertices x′ +p = 0, +p = 1, . . . k we can write the relation as +(di + k)x′ +i − +� +m∼i +x′ +m = (λ + k)x′ +i. +This shows that x′ is an eigenvector for the new graph. +□ +λ= +λ= +λ= +λ=1 +2 +λ= +λ=1 +−1 +1 +−1 +1 +−1 +1 +2 +λ= +λ=3 +−1 +1 +−1 +1 +−1 +1 +−1 +1 +3 +−1 +1 +1 +−1 +1 +1 +−1 +−1 +2 +3 +−1 +1 +−1 +1 +Figure 8: Example of the action of inserting a soft node. +Fig. 8 shows an example of the action of inserting a soft node. +15 + +When the graph is weighed, the result is still valid. Consider that we add +only one soft vertex connected to i by a weight wi,k. The eigenvalue of the new +graph is λ + wi,k. +This can transform a graph with an integer eigenvalue to a graph with an +irrational eigenvalue. +5.2 +Addition of a soft node +Connecting a soft node to all the vertices of a graph augments all the non zero +eigenvalues by 1. This result was found by Das [11]. We recover it here and +present it for completeness. +Theorem 5.2 Addition of a soft node : Let G(V, E) be a graph affording +an eigenvalue λ ̸= 0 for an eigenvector x. Then the new graph G′ obtained +by adding a node connected to all the nodes of G has eigenvalue λ + 1 for the +eigenvector x′ obtained by extending x by a zero component. +See Fig. 9 for examples. +Proof. Assume λ to be an eigenvalue with eigenvector v for the Laplacian L(G) +of a graph G with n vertices. Now add an extra vertex n + 1 connected to all +vertices of G and form L(G ∪ {n + 1}). We have the following identity + + + + + + +| +−1 +L(G) + In +| +−1 +| +−1 +− − − − − − − − − − − − − +| +− − −− +−1 − 1, · · · − 1 +| +n + + + + + + + + + + + + +v +−− +0 + + + + + + += (λ + 1) + + + + + + +v +−− +0 + + + + + + +which proves the statement. +□ +λ= +1 +−1 +1 +1 +λ= 2 +1 +−1 +1 +λ= +λ= +−1 + + + + +−1 +2 +3 +Figure 9: Examples of the addition of a soft node. +Important examples are the ones formed with the special graphs considered +above. There, adding a vertex to an n − 1 graph, one knows explicitly n − 1 +eigenvectors and eigenvalues. +16 + +The theorem 3.2 by Das [11] can be seen as a direct consequence of adding +a soft node and an articulation to a graph. +5.3 +Inserting a matching +First we define perfect and alternate perfect matchings. +Definition 5.3 (Perfect matching) A perfect matching of a graph G is a +matching (i.e., an independent edge set) in which every vertex of the graph is +incident to exactly one edge of the matching. +Definition 5.4 (Alternate perfect matching) An alternate perfect match- +ing for a vector v on the nodes of a graph G is a perfect matching for the nonzero +nodes such that edges eij of the matching satisfy vi = −vj +(̸= 0). +We have the following result [12] inspired by the alternating principle of +Merris [10]. +Theorem 5.5 (Add/Delete an alternate perfect matching) Let v be an +eigenvector of L(G) affording an eigenvalue λ. Let G′ be the graph obtained +from G by adding (resp. deleting) an alternate perfect matching for v. Then, v +is an eigenvector of L(G′) affording the eigenvalue λ + 2 (resp. λ − 2). +This is a second operator which shifts eigenvalues by ±2. Examples are given +in Fig. 10. +17 + +λ= +−1 +1 +−1 +1 +3 +λ= +λ=1 +λ= 3 +−1 +1 +−1 +1 +λ= 3 +λ=1 +−1 +1 +1 +−1 +1 +−1 +1 +−1 +−1 +1 +−1 +1 +−1 +1 +−1 +1 +5 + + + + +Figure 10: Examples of inserting a matching. +5.4 +Cartesian product +The cartesian product G□H of two graphs G = (V, E) and H = (W, F) +has set of vertices V × W = {(v, w), v ∈ V, w ∈ W}. +It’s set of edges is +{{(v1, w1), (v2, w2)}} such that v1 v2 ∈ V and w1w2 ∈ W. We have the follow- +ing result, see Merris [10]. +Theorem 5.6 If x is an eigenvector of G affording µ and y is an eigenvector +of H affording ν, then the Kronecker product of x andy , x⊗y is an eigenvector +of G□H for the eigenvalue µ + ν. +Fig. 11 illustrates the theorem. +18 + +Ch3 +1 +−1 +Ch3 +Ch3 +Ch3 +−1 +1 +1 +−1 +1 +−1 + + + + +Ch3 +−1 +Ch3 + + + +−1 +1 +1 +−1 +1 + +1 +−1 +Cy4 +Cy4 +Figure 11: Cartesian product of two chains 3 (left) and of a cycle 4 and a chain +3 (right). +Important examples are the ones formed with the special graphs considered +above. There, one knows explicitly the eigenvectors and eigenvalues. For ex- +ample, the cartesian product Cn × Cm of two chains Cn and Cm with n and m +nodes respectively has eigenvalues +λi,j = λi + λj, +where λi (resp. λj) is an eigenvalue for Cn (resp. Cm). The eigenvectors are +vi,j = cos[π(i − 1) +n +(p − 1 +2)] cos[π(j − 1) +m +(q − 1 +2)], +where i, p ∈ {1, . . ., n}, +j, q ∈ {1, . . . , m}. +5.5 +Graph complement +We recall the definition of the complement of a graph G. +Definition 5.7 (Complement of a graph ) Given a graph G(V, E) with n ver- +tices, its complement Gc is the graph Gc(V, Ec) where Ec is the complement of +E in the set of edges of the complete graph Kn. +We have the following property, see for example [1]. +Theorem 5.8 If x is an eigenvector of a graph G with n vertices affording +λ ̸= 0, then x is an eigenvector of Gc affording n − λ. +An example is shown in Fig. 12. The eigenvalues and eigenvectors are given +in table 1. +19 + +1 +2 +3 +4 +5 +6 +1 +4 +6 +2 +5 +3 +6.35 +6.101 + + + + +Figure 12: Graph 6.35 (left) in the classification [1] and its complement 6.101 +(right). +6.35 +5.2361 +5. +4 +3 +0.7639 +0 +6.101 +0.7639 +1 +2 +3 +5.2361 +0 +0.51167 +0.70711 +0. +0.18257 +-0.19544 +0.40825 +-0.31623 +0. +0.70711 +-0.36515 +-0.31623 +0.40825 +-0.31623 +0. +-0.70711 +-0.36515 +-0.31623 +0.40825 +0.51167 +-0.70711 +0. +0.18257 +-0.19544 +0.40825 +-0.51167 +0. +0. +0.73030 +0.19544 +0.40825 +0.12079 +0. +0. +-0.36515 +0.82790 +0.40825 +Table 1: Eigenvalues (top lines) and eigenvectors for the two complementary graphs +6.35 and 6.101 shown in Fig. 12 +Many times, Gc is not connected. An example where Gc is connected is the +cycle 6.... +6 +λ-soft graphs +6.1 +Definitions and properties +We introduce the notions of λ, λ soft and λ soft minimal graphs. The trans- +formations of the previous section will enable us to prove the relation between +these two types of graphs. +Definition 6.1 A graph G affording an eigenvector X for an eigenvalue λ is +λ. +20 + +Definition 6.2 A λ graph G affording an eigenvector X for the eigenvalue λ +is λ soft if one of the entries of X is zero. +Definition 6.3 A graph G affording an eigenvector X for an eigenvalue λ is λ +minimal if it is λ and minimal in the sense of inclusion. +Clearly, for a given λ, there is at least one λ minimal graph. As an example the +1 soft minimal graph is shown below. +Ch 3 + + + + +6.2 +λ subgraph +In the following section, we study the properties of a λ subgraph G included +in a λ graph G”(V ”, E”). Consider two graphs G(V, E) with n vertices and +G′(V ” − V, E′) such that E only connects elements of V and E′ only connects +elements of V ′. Assume two graphs G(n) and G′(n′) are included in a large +graph G” and are such that G(V, E) Assume p vertices of G are linked to p′ +vertices of G′. We label the p vertices of G, n − p + 1, . . . , n and the p′ vertices +of G′, 1, . . . , p′. We have +LX = λX, +(17) +L”X” = λX”, +(18) +where L” is the graph Laplacian for the large graph G”; L” can be written as +L” = +�L +0 +0 +L′ +� ++ + + + + +0 +0 +0 +0 +0 +a +−b +0 +0 +−bT +c +0 +0 +0 +0 +0 + + + + . +A first result is +Theorem 6.4 The square matrix δ = +� +a +−b +−bT +c +� +is a graph Laplacian. +Proof. The submatrices a, b, c have respective sizes a(p, p), b(p, p′), c(p′, p′), a +and c are diagonal and verify +aii = +p′ +� +j=1 +bij, +cii = +p +� +j=1 +bji. +(19) +In other words +aˆ1p = bˆ1p′, +cˆ1p′ = bT ˆ1p, +21 + +where ˆ1p is a p column vector of 1. +□ +At this point, we did not assume any relation between the eigenvectors X +for G and X” for G”. We have the following +Theorem 6.5 The eigenvalue relations (17,18) imply either X = X”(1 : n) or +X(1 : n − p) = 0. +Proof. For p = 1 and λ a single eigenvalue, rank(L − λI) = n − 1 so either +X = X”(1 : n) or X(1 : n − 1) = 0. +We admit the result for p > 1. +□ +We can then assume that the eigenvectors of L” have the form +L” +� +X +X′ +� += λ +� +X +X′ +� +, +where LX = λX. Substituting L”, we get +λ +�X +X′ +� += +�L +0 +0 +L′ +� �X +X′ +� ++ + + + + +0 +0 +0 +0 +0 +a +−b +0 +0 +−bT +c +0 +0 +0 +0 +0 + + + + +�X +X′ +� +. +Using the relation (17) we obtain +� +0 +0 +0 +a +� +X + +� +0 +0 +−b +0 +� +X′ = 0, +(20) +L′X′ − +�0 +bT +0 +0 +� +X + +�c +0 +0 +0 +� +X′ = λX′. +(21) +There are p non trivial equations in the first matrix equation and p′ in the +second one. Using an array notation (like in Fortran), the system above can be +written as +aX(n − p + 1 : n) − bX′(1 : p′) = 0, +(22) +−bT X(n − p + 1 : n) + cX′(1 : p′) + (L′X′)(1 : p′) = λX′(1 : p′), +(23) +(L′X′)(p′ + 1 : n′) = λX′(p′ + 1 : n′), +(24) +Extracting X from the first equation, we obtain +X(n − p + 1 : n) = a−1bX′(1 : p′), +(25) +and substituting in the second equation yields the closed system in X′ +(−bT a−1b + c)X′(1 : p′) + (L′X′)(1 : p′) = λX′(1 : p′), +(26) +(L′X′)(p′ + 1 : n′) = λX′(p′ + 1 : n′), +(27) +where we used the fact that the matrix a of the degrees of the connections is +invertible by construction. +22 + +Theorem 6.6 The matrix +∆ ≡ −bT a−1b + c, +is a generalized graph Laplacian: it is a Laplacian of a weighted graph. +Its +entries are rationals and not necessarily integers. +Proof. To prove this, note first that ∆ is obviously symmetric. We have +∆ˆ1p′ = −bT a−1bˆ1p′ + cˆ1p′ = −bT a−1aˆ1p + bT ˆ1p = 0. +This shows that the each diagonal element of ∆ is equal to the sum of it’s +corresponding row so that ∆ is a graph Laplacian. +□ From +theorem (2.10), the eigenvalues of ∆ are integers or irrationals and correspond +to eigenvectors with integer or irrational components. +We then write equations (26,27) as +( ¯∆ + L′)X′ = λX′, +(28) +where +¯∆ = +�∆ +0 +0 +0 +� +This is an eigenvalue relation for the graph Laplacian ( ¯∆ + L′). Four cases +occur. +(i) λ = 0 then X′ is a vector of equal components and X also. +(ii) λ ̸= 0 is an eigenvalue of L′. Then one has the following +Theorem 6.7 Assume a graph G” is λ for an eigenvector X” = (X, X′)T +and contains a λ graph G for the eigenvector X. Consider the graph G′ +with vertices V (G”) − V (G) and the corresponding edges in G”. +If G′ is λ then G” is obtained from G using the articulation or link trans- +formations. +Proof. Since λ ̸= 0 is an eigenvalue of L′, we can choose X′ an eigenvector +for λ so that L′X′ = λX′, then ∆X′ = 0. +A first possibility is X′ = 0, this corresponds to an articulation between +G and G′. +If X′ ̸= 0, L′X′ = λX′, implies that X′ is not a vector of equal components +so that X′ /∈ Null(∆). The only possibility for ∆X′ = 0 is ∆ = 0 so that +c = bT a−1b. +The term (bT a−1b)ij is +(bT a−1b)ij = +p +� +k=1 +bkibkj +akk +. +23 + +Since the matrix c is diagonal, we have +p +� +k=1 +bkibkj +akk += 0, ∀i ̸= j +Then bkibkj = 0 so that a vertex k from G is only connected to one other +vertex i or j from G′. Then p = p′. This implies aii = cii = 1, ∀i ∈ +{1, , . . . , p}. The graphs G and G′ are then connected by a number of +edges between vertices of same value. +□ +(iii) λ ̸= 0 is not an eigenvalue of L′ and L′ and ¯∆ share a common eigenvector +X′ for eigenvalues λ′ and λ − λ′ > 0. +For λ − λ′ = 1, a possibility is to connect a soft node of G to G′. For +λ − λ′ = p integer, a possibility is to connect p soft nodes of G to G′. +We conjecture that there are no other possibilities. +(iv) λ ̸= 0 is not an eigenvalue of L′ and L′ and ¯∆ have different eigenvectors. +Then there is no solution to the eigenvalue problem (28). +To see this, assume the eigenvalues and eigenvectors of L′ and ¯∆ are +respectively νi, V i, µi, W i so that +L′V i = νiV i, +¯∆W i = µiW i, +i = 1, 2, . . . n +The eigenvectors can be chosen orthonormal and we have +QV = WQ +where Q = (qj +k) is an orthogonal matrix, V and W are the matrices whose +columns are respectively V i and W i. We write +W j = +� +k +qj +kV k. +Assuming X′ exists, we can expand it as X′ = � +i αiV i Plugging this +expansion intro the relation ( ¯∆ + L′)X′ = λX′ yields +� +i + +αiνiV i + αi +� +j +qi +jµj +� +k +qj +kV k + + = +� +i +λαiνiV i +Projecting on a vector V m we get +αmνm + αm +� +j +qm +j µjqj +m = λαmνm +A first solution is αm = 0, ∀m so that X′ = 0, an articulation. If αm ̸= 0 +then we get the set of linear equations linking the νi to the µi. +� +j +qm +j µjqj +m = (λ − 1)νm, +m = 1, . . . n +Since Q is a general orthogonal matrix, the terms qm +j +are irrational in +general. Therefore we conjecture that there are no solutions. +24 + +6.3 +Examples of λ subgraphs +Using simple examples, we illustrate the different scenarios considered above. +We first consider theorem (6.7), see Fig. 13. +G +G" +G’ +−1 +1 +G +1 +3 +2 +4 +5 +6 +7 + + + + +−1 +1 +1 +G’ +G +−1 +G" +4 +5 +6 +7 +1 +3 +2 +Figure 13: Two configurations where a graph G is included in a larger graph +G” for the eigenvalue 1. +Consider the configuration on the left of Fig. 13. We have +L = + + +1 +−1 +0 +0 +1 +1 +−1 +−1 +2 + + , +L′ = + + + + +1 +−1 +0 +0 +−1 +3 +−1 +−1 +0 +−1 +1 +0 +0 +−1 +0 +1 + + + + . +(29) +Note that L and L′ have 1 as eigenvalue. Here p = 1, p′ = 3 and +a = 3, b = (1, 1, 1)T, c = + + +1 +0 +0 +0 +1 +0 +0 +0 +1 + + , +so that +∆ = + + +2 +3 +− 1 +3 +− 1 +3 +− 1 +3 +2 +3 +− 1 +3 +− 1 +3 +− 1 +3 +2 +3 + + . +The matrices ¯∆ and L′ have different eigenvectors for the same eigenvalue 1. +Choosing X′ an eigenvector of L′ for the eigenvalue 1 yields ¯∆X′ = 0. The only +solution is X′ = 0, this is an articulation. +25 + +For the configuration on the right of Fig. 13 we have p = p′ = 3. +a = + + +1 +0 +0 +0 +1 +0 +0 +0 +1 + + , +b = + + +1 +0 +0 +0 +0 +1 +0 +0 +0 +0 +1 +0 + + , +c = + + +1 +0 +0 +0 +1 +0 +0 +0 +1 + + , +so that ∆ = + + +0 +0 +0 +0 +0 +0 +0 +0 +0 + + . We have +LX = 1X, +(30) +L”(X, X′)T = 1(X, X′)T , +(31) +where X = (X1, X2, X3)T In this configuration, X′ is an eigenvector of L′ for +the eigenvalue 1. and we have Link connections between G and G′. +Finally, we show an example of case (iii) where G, G” are 2 soft and G′ is 1 +soft. + + + G +1 +G’ +−1 +1 +−1 + + + + +8 +7 +6 +5 +1 +2 +3 +4 +Figure 14: An example of case (iii) for eigenvalue λ = 2. +We have to solve ( ¯∆ + L′)X′ = 2X′ where +L = + + + + +2 +−1 +0 +−1 +−1 +2 +−1 +0 +0 +−1 +2 +−1 +−1 +0 +−1 +2 + + + + , L′ = + + + + +1 +0 +−1 +0 +0 +1 +−1 +0 +−1 +−1 +3 +−1 +0 +0 +−1 +1 + + + + , ¯∆ = + + +0.5 +−0.5 +0 +−0.5 +0.5 +0 +0 +0 +0 + + . +Note that the eigenvector X′ = (1, −1, 0, 0)T is shared by L′ and ¯∆ so that +( ¯∆ + L′)X′ = 2X′. +The transformations introduced in the two previous sections enable us to +link the different members of a given class. To summarize, we have +• Articulation : one can connect any graph G2 to the soft nodes of a given +graph G1 and keep the eigenvalue. The new graph G1 ∪G2 has soft nodes +everywhere in G2. +26 + +• Link : introducing a link between equal nodes does not change the eigen- +value and eigenvector. +• Contraction of a d-regular graph linked to a soft node. To have minimal +graphs in the sense of Link we need to take d = 0. +• Soldering : one can connect two graphs by contracting one or several soft +nodes of each graph. +In the next subsections we present a classification of small size λ soft graphs for +different λs. +6.4 +1-soft graphs +1 +1 +−2 +5.8 +2 +1 +1 +4 +−8 +6.107 +1 +1 +−1 +−1 +5.28 +1 +1 +−4 +2 +5.28 +2 +1 +1 +−2 +−2 +6.107 +2 +2 +2 +−3 +−3 +6.107 +Ch 3 +1 +−1 +5.3 +1 +1 +1 +−3 +5.28 +1 +1 +2 +2 +−6 +6.107 +1 +1 +1 +1 +−4 +6.107 +Figure 15: 1s graphs: graphs generated by expansion. +Fig. 15 shows some of the 1s graphs generated by expansion. Note the variety +of possibilities. +27 + +1 +−1 +1 +−1 +Ch 3 +1 +−1 +1 +−1 +1 +−1 +5.3 +5.8 +5.28 +5.29 +1 +−1 +1 +−1 +1 +−1 +6.107 +6.108 +6.109 +6.111 +Figure 16: 1s graphs: graphs generated by articulation +Fig. 16 shows some of the 1s graphs generated by articulation. The 1, 0, −1 +configuration remains clearly visible. +28 + +Ch 6 +1 +1 +−1 +−1 +y6 +C +−1 +−1 +1 +1 +112 +79 +1 +1 +1 +1 +−4 +1 +−1 +107 +8 +2 +1 +1 +1 +−3 +1 +−1 +109 +6. +8 +1 +−1 +5. +Ch 3 +1 +−1 +8 +2 +1 +1 +−1 +−1 +6 +5. +23 +1 +1 +−2 +5. +97 +1 +1 +−26. +79 +1 +1 +−1 +−1 +6. +−3/ 2 +−3/ 2 +1 +1 +1 +36 +6. +4 +− +1 +1 +1 +1 +6.107 +−1 +1 +6.101 +A +C +C +C +C +Link +6.92 +6. +6 + 0 +1 +6. +5. +6. +6.95 +6.94 +−1 +1 +6. +6. +101 +108 +6.99 +A +A +C +−2 +1 +1 +5.8 +5.3 +1 +−1 +5.28 +5.23 +Link +C +A +A +C +5.28 +1 +−1 +1 +−1 +5.23 +5.20 +A +6.97 +A +5.18 +5.23 +5.14 +C +6.53 +6.64 +6.78 +6.75 +6.58 +6.56 +−1 +1 +1 +5.28 +1 +−1 +−1 +6.109 +− +1 +6.107 +−1 +6.94 +6.78 +6.75 +6.53 +6.108 +6.10 +6.19 +6.33 +6.38 +6.36 +6.61 +6.79 + + + + +6.111 +Figure 17: 1-soft graphs. +The soft nodes are in boldface. +We only present +symmetric expansions so that links are possible. +Fig. 17 shows the 1s graphs with at most 6 vertices. Notice how they are +linked by articulation (A), expansion/contraction (C) and links and can all be +obtained from the graph 5.3 (chain 3). The connection Ch3 - 28 is a contraction +of two Ch3 chains. Connecting two 3 chains Ch3 with an Link transformation +we obtain a chain 6 Ch6. One can also go from Ch6 to 23 by soldering the two +soft nodes. +6.5 +2-soft graphs +Fig. 18 shows some of the 2s graphs generated by expansion of the 5.7 graph. +29 + +−1 +2 +1 +1 +1 +1 +2 +−4 +−1 +−1 +−1 +6.73 +6.73 +5.22 +−1 +1 +5.7 +−3 +1 +1 +1 +6.73 +Figure 18: 2s graphs: graphs generated by expansion. +Similarly Fig. 19 shows some of the 2s graphs generated by articulation from +the same graph. +−1 +1 +−1 +1 +−1 +1 +−1 +1 +5.7 +−1 +1 +6.101 +6.103 +6.104 +5.26 +Figure 19: 2s graphs: graphs generated by articulation +Fig. 20 shows all 2s graphs with at most 6 vertices. We included graph 5.1 +because with a link it gives configuration 6.104. Notice how all graphs can be +generated from 5.5 and 5.1. +30 + +1 +−1 +5.1 +1 +−1 6.103 +1 +−1 +5.52 +C +A +Cy +1 +−1 +−1 +−1 +1 +1 +−1 + + +1 +1 +−2 +5.12 +5.17 +5.5 +6.101 +6.88 +6.101 +A +1 +−1 +A +6.104 +A + +−1 −1 +1 +6.104 +1 +A +6.93 +6.82 +6.93 +6.103 + + + + +5.15 +6.57 +6.76 +6.93 + +6.75 +6.49 +6.31 +6.13 +−2 +1 +1 +1 +−1 +1 +1 +1 +−3 +6.73 +6.73 +6.73 +1 +−1 5.26 +5.7 +5.22 +−1 +1 +6.91 +1 +−1 6.81 +5.18 +6.76 +Figure 20: 2-soft graphs +6.6 +3-soft graphs +Fig. 21 shows a 3s graph generated by expansion of graph 5.22. +−1 +1 +1 +1 +−2 +5.22 +6.52 +Figure 21: 3s graphs: graphs generated by expansion. +31 + +Fig. 22 shows some 3s graphs generated by articulation on graphs 5.2 and +5.22. +1 +−1 +1 +−1 +5.6 +1 +−1 +5.23 +1 +−1 +1 +−1 +1 +−1 +1 +−1 +1 +−1 +1 +−1 +5.2 +5.25 +6.100 +6.99 +6.97 +6.94 +5.22 +−1 +1 +−1 +1 +6.90 +−1 +1 +−1 +1 +−1 +1 +Figure 22: 3s graphs: graphs generated by articulation +32 + +−1 +6.100 +1 +5.17 +5.13 +5.11 +−1 5.2 +1 + + + + + + +−1 +−1 +−1 +−1 +6.99 +5.6 +5.25 +6.97 +1 +1 +1 +1 +−1 +1 +6.94 +6.88 +−1 +5.23 +1 +5.22 +−1 +−1 +6.91 +1 +1 +5.3 +1 +1 +−2 +1 +1 +−2 +1 +1 1 +−1 +−2 +−2 +2 +−1 +1 +−1 +1 +5.20 +5.16 +1 +−1 +1 +−1 +6.106 +6.99 +6.100 + + +Figure 23: 3-soft graphs. +Fig. 23 shows all 3s graphs with at most 6 vertices. Notice how they are +generated by graphs 5.2, 5.22 and 5.3. Graph 5.20 is the soldering of two graphs +5.2 . +6.7 +4-soft graphs +Fig. 24 shows some 4s graphs generated by articulation on the graph 5.3. +33 + +−1 +1 +6.82 +−1 +1 +6.78 +−1 +1 +5.5 +−1 +1 +5.19 +−1 +1 +6.80 +Figure 24: 4s graphs: graphs generated by articulation +Fig. 25 shows the 4s graphs with at most 6 vertices. Notice how they are +generated from graphs 5.5 (2 configurations) and 6.93. The graph 5.7 is included +to show its connection to 6.93 (replacing a matching by a square). +34 + +−1 +1 +6.78 +−1 +1 +6.80 +−1 +1 +A +A +−1 +1 +5.5 +1 +5.5 +1 +−2 +−1 +1 +5.5 +−1 +5.7 +1 +−1 +1 +−1 +1 +−1 +1 +6.93 +−1 +1 +5.19 + +6.82 +A +A +−1 +1 +5.17 +A +−1 +1 +6.73 +1 +1 +−2 +1 +1 +−2 +5.18 +6.81 +A +A +5.14 +6.48 6.31 + + + +Figure 25: 4-soft graphs. +6.8 +5-soft graphs +Fig. 26 shows 5s graphs with at most 6 vertices. Notice how they stem from +graphs 6.70, 5.13 and two configurations of 5.15. +35 + +1 +1 +−2 5.13 +1 +1 +−2 +6.39 +−1 +−1 +1 +1 +6.70 +A +1 +1 +−2 +1 +1 +1 +−3 5.15 +5.12 +6.35 +1 +−1 +5.15 +6.51 + +5.10 +5.11 +6.19 +6.30 6.44 + +6.29 +6.32 6.34 +6.45 +6.26 +6.19 +−1 +−1 +1 +1 +5.4 +S +A +A +1 +−1 +1 +−1 +6.57 +6.57 +A +6.20 +6.33 6.56 +A +1 +1 +−3 + +6.56 +1 +6.57 + +Figure 26: 5-soft graphs. +6.9 +6-soft graphs +Fig. 27 shows 6s graphs with at most 6 vertices. Notice how these graphs stem +from graphs 6.9, 6.37, 6.2 (two configurations) and 6.16. +36 + +6.37 + + +1 +−1 +6.9 + + +1 +−1 +1 +1 +−2 +6.16 +6.37 + + +−4 +1 +1 +1 +1 +6.11 + + + + +1 +−1 +6.8 − 6.6 − 6.4 −6.3 − 6.2 +6.6 − 6.5 − 6.4 − 6.3 − 6.2 +Figure 27: 6-soft graphs. +6.10 +x-soft graphs, x non integer +As proven above, the only eigenvalues that are non integer are irrational. For +these, there can be soft nodes. Among the 5 node graphs, we found irrational +eigenvalues for the chain 5 and the cycle 5. In addition, there are the following +nb. in +eigenvalue +eigenvector +classification +5.16 +λ2 = 3 − +√ +2 +(−0.27, −0.65, 0, 0.65, 0.27)T +5.16 +λ4 = 3 + +√ +2 +(0.65, −0.27, 0, 0.27, −0.65)T +5.21 +λ4 = (7 + +√ +5)/2 +(−0.6, 0.6, 0.37, 0, −0.37)T +5.21 +λ5 = (7 − +√ +5)/2 +(−0.37, 0.37, −0.6, 0, 0.6)T +5.24 +λ2 = (5 − +√ +13)/2 +(−0.67, −0.2, 0.2, 0.67, 0)T +5.24 +λ5 = (5 + +√ +13)/2 +(−0.2, 0.67, −0.67, 0.2, 0)T +5.30 (chain 5) +λ4 = (3 + +√ +5)/2 +(−0.6, 0.6, 0.37, 0, −0.37)T +5.30 (chain 5) +λ5 = (3 − +√ +5)/2 +(−0.37, 0.37, −0.6, 0, 0.6)T +Table 2: Non trivial graphs with soft nodes and non integer eigenvalues. +Remarks +The graph 5.16 is 3 soft. The graphs 5.21 and 5.24 are not part of an integer +soft class. They are +37 + +1 +2 +3 +4 +5 +5.21 +1 +2 +3 +4 +5 +5.24 +1 +2 +3 +4 +5 +5.16 +Figure 28: +The graphs 5.16, 5.21 and 5.24 with their soft node +• Graph 5.16 is a chain 4 with a soft node added. +• Graph 5.21 is obtained from chain 5 (graph 5.30) by inserting a soft node. +6.11 +Minimal λ soft graphs +We computed the minimal λ soft graphs for λ = 1, . . . , 6. These are presented +in Fig. 29. +5.15 +−1 +1 +−3 +1 +1 +1 +5.15 +6.9 +−1 +−1 +1 +1 +1 +−1 +5.5 +Ch 3 +1 +−1 +5.3 +1 +−1 +1s +2s + + + + +−1 +1 + +5.7 +5.2 +5.5 +6.37 +5s +−1 +1 +−1 +1 +1 +−1 +−1 +1 +5.13 +6.70 + +6s +6.37 +6.37 +6.16 +−4 +1 +1 +1 +1 +−1 +1 +1 +1 +−2 +−3 +1 +1 +1 +6.6 +1 +−1 +1 +−1 +3s +4s +Figure 29: +The minimal λ soft graphs for λ = 1, 2, 3, 4, 5 and 6. +Note that there is a unique minimal λ-soft graph for λ = 1 and 2. There +38 + +are two minimal 3-soft graphs and 4-soft graphs. There are four minimal 5-soft +graphs. The first two are generated by respectively inserting a soft node and +adding a soft node to the minimal 4-soft graph. The third and fourth ones are +obtained respectively by adding three soft nodes to the 2 clique and adding a +soft node to the 4 star. +Three systematic ways to generate minimal λ+1-soft graphs are (i) inserting +a zero to a λ-soft graph, +(ii) adding a zero to aλ-soft graph and +(iii) adding a matching to a λ − 1-soft graph. One can therefore generate sys- +tematically minimal 7-soft, 8-soft.. graphs. +7 +Conclusion +We reviewed families of graphs whose spectrum is known and presented trans- +formations that preserve an eigenvalue. The link, articulation and soldering +were contained in Merris [10] and we found two new transformations : the reg- +ular expansion and the replacement of a coupling by a square. We also showed +transformations that shift an eigenvalue : insertion of a soft node (+1), addi- +tion of a soft node (+1), insertion of a matching (+2). The first is new and the +second and third were found by Das [11] and Merris [10] respectively. +From this appears a landscape of graphs formed by families of λ-graphs +connected by these transformations. These structures remain to be understood. +We presented the connections between small graphs with up to six vertices. Is it +possible to obtain all the λ graphs using a series of elementary transformations? +Or just part of these ? +We answered partially the question: can one predict eigenvalues/eigenvectors +from the geometry of a graph ? by examining the situation of a a λ subgraph +G of a λ graph G”. We showed that if the remainder graph G′ is λ, it is an +articulation or a link of G. If not and if G and G′ share an eigenvector, the two +may be related by adding one or several soft nodes to G′. +A number of the graphs we studied have irrational eigenvalues and we can +define λ graphs for these as well because the transformations apply. However +we did not find any connection between λ graphs and µ graphs if λ is an integer +and µ an irrational. +References +[1] D. Cvetkovic, P. Rowlinson and S. Simic, ”An Introduction to the Theory +of Graph Spectra”, London Mathematical Society Student Texts (No. 75), +(2001). +39 + +[2] C. Maas, Transportation in graphs and the admittance spectrum, Discrete +Applied Mathematics 16 (1987) 31-49 +[3] J.-G. Caputo, A. Knippel and E. Simo, ”Oscillations of simple networks: +the role of soft nodes”, J. Phys. A: Math. Theor. 46, 035100 (2013) +[4] J.-G. Caputo, A. Knippel, and N. Retiere, Spectral solution of load flow +equations, Eng. Res. Express, 025007, (2019). +[5] F. Bustamante-Casta˜neda, J.-G. Caputo, G. Cruz-Pacheco, A. Knippel and +F. Mouatamide, ”Epidemic model on a network: analysis and applications +to COVID-19”, Physica A, 564, 125520, (2021). +[6] U. Von Luxburg, A tutorial on spectral clustering Stat Comput, 17: +395–416, (2007). +[7] Mark Kac, Can One Hear the Shape of a Drum?, The American Mathe- +matical Monthly, 73, 4P2, 1-23, (1966). +[8] B. Mohar, The Laplacian spectrum of graphs, in: Y. Alavi, G. Chartrand, +O.R. Oellermann, A.J. Schwenk Wiley (Eds.), Graph Theory, Combina- +torics and Applications, vol. 2, 1991, 871–898, (1991). +[9] T. Biyikoglu, J. Leydold and P. F. Stadler ”Laplacian Eigenvectors of +Graphs”, Springer (2007). +[10] R. Merris, ”Laplacian graph eigenvectors”, Linear Algebra and its Appli- +cations, 278, 22l-236, (1998) . +[11] K. Ch. Das, The Laplacian Spectrum of a Graph, Computers and Mathe- +matics with Applications, 48, 715-724, (2004). +[12] J. G. Caputo, I. Khames, A. Knippel, ”On graph Laplacians eigenvectors +with components in 1,-1,0”, Discrete Applied Mathematics, 269, 120-129, +(2019). +[13] https://en.wikipedia.org/wiki/Rational root theorem +[14] R. Grone and R. Merris, The Laplacian spectrum of a graph, Siam J. +Discrete Math. (C) 1994 Vol. 7, No. 2, pp. 221-229, May 1994 +[15] Thomas Edwards, ”The Discrete Laplacian of a Rectangular Grid”, web +document, (2013). +8 +Appendix A: Graph classification +The following tables indicate the graph classification we used. Each line in the +”connections” column is the connection list of the corresponding graph. +40 + +classification +nodes +links +connections +[1] +1 +2 +1 +12 +2 +3 +3 +12 13 23 +3 +3 +2 +12 23 +4 +4 +6 +12 13 14 23 24 34 +5 +4 +5 +12 13 14 23 34 +6 +4 +4 +12 13 23 34 +7 +4 +4 +12 14 23 34 +8 +4 +3 +12 23 24 +9 +4 +3 +12 23 34 +10 +5 +10 +12 13 14 15 23 24 25 34 35 45 +11 +5 +9 +12 13 14 15 23 24 34 35 45 +12 +5 +8 +12 14 15 23 42 25 34 45 +13 +5 +8 +12 13 15 23 24 34 35 45 +14 +5 +7 +12 13 14 23 24 34 35 +15 +5 +7 +13 15 23 25 34 35 45 +16 +5 +7 +12 13 15 23 34 35 45 +17 +5 +7 +12 14 15 23 25 34 45 +18 +5 +6 +12 13 14 23 34 35 +19 +5 +6 +12 14 23 24 34 35 +20 +5 +6 +12 13 23 34 35 45 +21 +5 +6 +12 15 23 34 35 45 +22 +5 +6 +13 15 23 25 34 45 +23 +5 +5 +12 13 23 34 35 +24 +5 +5 +12 23 25 35 34 +25 +5 +5 +12 13 23 34 45 +26 +5 +5 +12 14 23 34 35 +27 +5 +5 +12 15 23 34 45 +28 +5 +4 +13 23 34 35 +29 +5 +4 +12 13 14 45 +30 +5 +4 +12 23 34 45 +Table 3: Graphs of less than 5 nodes labelled 1 to 30 in classification [1]. +41 + +classification +nodes +links +connections +[1] +1 +6 +15 +12 13 14 15 16 23 24 25 26 34 35 36 45 46 56 +2 +6 +14 +12 13 15 16 23 24 25 26 34 35 36 45 46 56 +3 +6 +13 +12 14 15 16 23 24 25 26 34 35 45 46 56 +4 +6 +13 +12 13 15 16 23 24 25 26 34 35 45 46 56 +5 +6 +12 +12 13 15 16 23 25 26 34 35 36 45 56 +6 +6 +12 +12 13 14 15 16 23 25 34 35 36 45 56 +7 +6 +12 +12 13 15 16 23 24 25 26 34 35 45 56 +8 +6 +12 +12 13 15 16 23 24 34 35 36 45 46 56 +9 +6 +12 +12 13 15 16 23 24 26 34 35 45 46 56 +10 +6 +11 +12 13 14 15 23 24 25 34 35 45 56 +11 +6 +11 +12 14 16 23 24 26 34 36 45 46 56 +12 +6 +11 +12 13 15 16 23 25 26 34 35 45 56 +13 +6 +11 +12 15 16 23 24 25 26 34 35 45 56 +14 +6 +11 +12 13 15 16 23 25 26 34 36 45 56 +15 +6 +11 +12 14 15 16 23 24 34 35 45 46 56 +16 +6 +11 +12 14 15 16 23 25 34 35 36 45 56 +17 +6 +11 +12 14 15 16 23 26 34 35 45 46 56 +18 +6 +11 +12 15 16 23 24 26 34 35 45 46 56 +19 +6 +10 +12 13 15 23 24 25 34 35 45 56 +20 +6 +10 +12 13 14 15 23 24 34 35 45 56 +21 +6 +10 +12 15 16 23 24 25 26 35 45 56 +22 +6 +10 +12 13 15 16 23 34 35 36 45 56 +23 +6 +10 +12 16 23 25 26 34 35 36 45 56 +24 +6 +10 +12 15 16 23 25 26 34 35 45 56 +25 +6 +10 +12 13 14 15 16 23 34 36 45 56 +26 +6 +10 +12 14 16 23 34 35 36 45 46 56 +27 +6 +10 +12 15 16 23 26 34 35 36 45 56 +28 +6 +10 +12 14 16 23 24 34 35 45 46 56 +29 +6 +10 +12 14 16 23 24 26 34 36 45 56 +30 +6 +10 +12 15 61 23 24 25 34 36 45 56 +Table 4: 6 node graphs labelled 1 to 30 in classification [1]. +42 + +classification +nodes +links +connections +[1] +31 +6 +10 +12 15 16 23 24 26 34 35 45 56 +32 +6 +10 +12 14 16 23 25 26 34 36 45 56 +33 +6 +9 +12 15 23 24 25 34 35 45 56 +34 +6 +9 +12 14 15 23 24 25 34 45 56 +35 +6 +9 +12 13 14 15 23 24 34 45 56 +36 +6 +9 +12 13 14 23 24 34 45 46 56 +37 +6 +9 +12 13 14 15 16 24 34 45 46 +38 +6 +9 +12 14 15 23 25 34 35 45 56 +39 +6 +9 +12 13 15 23 24 25 34 45 56 +40 +6 +9 +12 13 16 23 34 35 36 46 56 +41 +6 +9 +12 16 23 34 35 36 45 46 56 +42 +6 +9 +12 16 23 24 26 34 45 46 56 +43 +6 +9 +12 13 16 23 34 35 36 45 56 +44 +6 +9 +12 13 16 23 34 36 45 46 56 +45 +6 +9 +12 16 23 25 34 35 36 45 56 +46 +6 +9 +12 13 15 16 23 34 36 45 56 +47 +6 +9 +12 13 16 23 25 26 34 45 56 +48 +6 +9 +12 15 16 23 26 34 35 45 56 +49 +6 +9 +12 15 16 23 24 26 35 45 56 +50 +6 +9 +12 14 15 16 23 34 36 45 56 +51 +6 +9 +12 15 24 16 23 34 36 45 56 +52 +6 +9 +12 14 16 23 25 34 36 45 56 +53 +6 +8 +12 13 14 23 24 34 45 46 +54 +6 +8 +12 13 14 23 24 25 34 36 +55 +6 +8 +12 13 14 23 24 34 45 56 +56 +6 +8 +13 15 23 25 34 35 45 56 +57 +6 +8 +12 14 23 24 25 34 45 56 +58 +6 +8 +12 15 23 25 34 35 45 56 +59 +6 +8 +12 13 15 23 34 35 45 56 +60 +6 +8 +12 14 15 23 24 34 45 56 +Table 5: 6 node graphs labelled 31 to 60 in classification [1]. +43 + +classification +nodes +links +connections +[1] +61 +6 +8 +12 13 14 15 16 23 45 46 +62 +6 +8 +12 14 23 42 34 35 36 56 +63 +6 +8 +12 14 15 23 25 34 45 56 +64 +6 +8 +12 13 15 23 25 34 45 56 +65 +6 +8 +12 13 15 23 24 34 45 56 +66 +6 +8 +12 16 24 34 36 45 56 46 +67 +6 +8 +12 13 16 23 34 36 45 56 +68 +6 +8 +12 13 16 23 34 35 45 56 +69 +6 +8 +12 15 16 23 26 34 45 56 +70 +6 +8 +12 13 16 23 34 45 46 56 +71 +6 +8 +12 13 16 23 34 35 46 56 +72 +6 +8 +12 15 16 23 34 36 45 56 +73 +6 +8 +12 15 23 24 26 35 45 56 +74 +6 +8 +12 14 16 23 25 34 45 56 +75 +6 +7 +12 13 14 23 34 35 36 +76 +6 +7 +12 23 24 25 34 45 46 +77 +6 +7 +12 14 23 24 25 34 36 +78 +6 +7 +12 14 23 24 34 35 36 +79 +6 +7 +12 13 23 34 36 35 45 +80 +6 +7 +12 23 25 34 35 45 46 +81 +6 +7 +12 13 14 23 34 35 56 +82 +6 +7 +12 14 23 24 34 35 56 +83 +6 +7 +12 13 23 34 35 45 46 +84 +6 +7 +12 13 23 34 45 46 56 +85 +6 +7 +12 15 23 24 34 45 46 +86 +6 +7 +12 13 15 23 34 45 46 +87 +6 +7 +12 13 15 23 34 45 46 +87B +6 +7 +12 13 15 24 34 45 56 +88 +6 +7 +12 14 23 34 35 36 56 +89 +6 +7 +12 15 16 23 34 45 56 +90 +6 +7 +13 15 23 25 34 45 56 +Table 6: 6 node graphs labelled 61 to 90 in classification [1]. Note that 87B is absent +from [1]. +44 + +classification +nodes +links +connections +[1] +91 +6 +7 +12 14 23 25 34 45 56 +92 +6 +7 +12 16 23 34 36 45 56 +93 +6 +7 +12 15 23 34 36 45 56 +94 +6 +6 +12 13 23 34 35 36 +95 +6 +6 +13 23 34 35 45 56 +96 +6 +6 +12 23 25 34 35 56 +97 +6 +6 +12 13 23 34 35 56 +98 +6 +6 +12 23 35 34 45 56 +99 +6 +6 +12 23 24 45 46 56 +100 +6 +6 +12 23 34 45 46 56 +101 +6 +6 +12 14 23 34 35 36 +102 +6 +6 +12 14 23 25 34 36 +103 +6 +6 +12 23 42 35 45 56 +103 +6 +6 +12 14 23 34 35 56 +105 +6 +6 +12 15 23 34 45 46 +106 +6 +6 +12 16 23 34 45 56 +107 +6 +5 +16 26 36 46 56 +108 +6 +5 +14 24 34 45 56 +109 +6 +5 +13 23 34 45 46 +110 +6 +5 +12 23 34 36 45 +111 +6 +5 +12 23 34 45 46 +112 +6 +5 +12 23 34 45 56 +Table 7: 6 node graphs labelled 91 to 112 in classification [1]. +9 +Appendix B: sets 1s, 2s, 3s, 4s and 5s +We give here the tables for the sets 1s, 2s, 3s, 4s and 5s for 5 node graph and 6 +node graphs. The numbering of the graphs follow the ones given by Cvetkovic +[1] for 5 and less nodes and 6 nodes graphs respectively. +45 + +9.1 +1s +nodes +links +classification [1] +eigenvector +connection +3 +2 +3 +(−1, 0, 1) +4 +3 +8 +(0, 0, −1, 1) +4 +4 +6 +(1, 1, 0, −2) +expansion on 5.3 +5 +4 +28 +(1, 1, 0, −1, −1) +5 +4 +28 +(1, 0, 0, 0, −1) +articulation on 5.3 +5 +4 +28 +(1, 1, 1, 0, −3) +star 4 +5 +4 +29 +(0, 1, −1, 0, 0) +articulation on 5.3 +5 +5 +23 +(0, 0, 0, 1, −1) +articulation on 5.3 +5 +5 +23 +(1, 1, 0, −2, 0) +expansion on 5.3 +5 +6 +18 +(1, 1, 0, 1, −3) +5 +6 +20 +(1, 1, 0, −1, −1) +articulation on 5.28 +5 +7 +14 +(1, 1, 0, 1, −3) +Table 8: Five node graphs with soft nodes and eigenvalue 1. +46 + +nodes +links +classification [1] +eigenvector +connection +6 +11 +10 +(1, 1, 1, 1, 0, −4) +link on 19 +6 +10 +19 +(1, 1, 1, 1, 0, −4) +link on 33 +6 +9 +33 +(1, 1, 1, 1, 0, −4) +link on 38 +6 +9 +36 +(2, 2, 2, 0, −3, −3) +link on 61 +6 +9 +38 +(1, 1, 1, 1, 0, −4) +link 58 +6 +9 +53 +(0, 0, 0, 0, 1, −1) +link 75 +6 +9 +53 +(1, 1, 1, 0, −3, 0) +link 75 +6 +8 +56 +(1, 1, 1, 1, 0, −4) +expansion on 5.3 +6 +8 +58 +(1, 1, 1, 1, 0, −4) +expansion on 5.3 +6 +8 +61 +(2, 2, 2, 0, −3, −3) +link on 94 +6 +7 +75 +(0, 0, 0, 0, 1, −1) +articulation on 5.3 +6 +7 +75 +(1, 1, 0, 1, −3, 0) +link on 101 +6 +7 +78 +(0, 0, 0, 0, 1, −1) +link on 101 +6 +7 +79 +(1, 1, 0, 0, 0, −2) +expansion on 5.3 +6 +7 +79 +(1, 1, 0, −1, −1, 0) +link on 94 +6 +7 +92 +(1, 1, 0, −1, −1, 0) +link on 106 +6 +6 +94 +(1, 1, 0, −2, 0, 0) +link on 107 +6 +6 +94 +(3, 3, 0, −2, −2, −2) +link on 107 +6 +6 +94 +(1, −1, 0, 0, 0, 0) +link on 95 +6 +6 +95 +(1, −1, 0, 0, 0, 0) +articulation on 5.3 +6 +6 +97 +(1, 1, 0, −2, 0, 0) +link on 108 +6 +6 +99 +(1, 0, −1, 0, 0, 0) +articulation on 5.3 +6 +6 +101 +(0, 0, 0, 0, 1, −1) +articulation on 5.3 +6 +6 +106 +(0, 1, 1, 0, −1, −1) +link between two 5.3 +6 +5 +107 +(−1, 1, 0, 0, 0, 0) +articulation on 5.3 +6 +5 +107 +(1, 1, −1, −1, 0, 0) +soldering two 5.3 and articulation +6 +5 +107 +(1, 1, 0, −2, 0, 0) +articulation on 5.3 +6 +5 +108 +(−1, 1, 0, 0, 0, 0) +articulation on 5.3 +6 +5 +108 +(−1, 0, 1, 0, 0, 0) +articulation on 5.3 +6 +5 +109 +(−1, 1, 0, 0, 0, 0) +articulation on 5.3 +6 +5 +109 +(0, 0, 0, 0, −1, 1) +articulation on 5.3 +6 +5 +111 +(0, 0, 0, 0, −1, 1) +articulation on 5.3 +Table 9: Six node graphs with soft nodes and eigenvalue 1. +47 + +9.2 +2s +nodes +links +classification [1] +eigenvector +connection +4 +5 +5 +(1, 0, −1, 0) +link on 5.7 +4 +4 +7 +(1, 0, −1, 0) +4 +4 +7 +(0, 1, 0, −1) +5 +8 +12 +(1, 0, −2, 0, 1) +link on 5.17 +5 +7 +15 +(1, 0, −1, 0, 0) +articulation on 5.7 +5 +7 +15 +(1, 01, 0, −2) +link on 5.17 +5 +7 +17 +(1, 0, −2, 0, 1) +5 +6 +18 +(0, 1, 0, −1, 0) +articulation 5.7 +5 +6 +22 +(0, 1, 0, −1, 0) +add a zero to 5.3 and articulation +5 +6 +22 +(1, 0, 0, −1, 0) +add a zero to 5.3 and articulation +5 +5 +26 +(0, 1, 0, −1, 0) +articulation 5.7 +Table 10: Five node graphs with soft nodes and eigenvalue 2. +48 + +nodes +links +classification [1] +eigenvector +connection +6 +12 +5 +(1, 1, 0, −3, 0, 1) +6 +11 +11 +(1, 1, 1, 0, −3, 0) +6 +11 +13 +(1, 0, −1, −1, 0, 1) +6 +11 +14 +(1, 1, 0, −3, 0, 1) +6 +10 +21 +(1, 0, −1, −1, 0, 1) +6 +10 +21 +(0, 0, 1, −1, 0, 0) +6 +10 +29 +(1, 1, 1, 0, −3, 0) +6 +10 +31 +(1, 0, −1, −1, 0, 1) +link on 37 +6 +9 +33 +(−2, 0, 1, 1, 0, 0) +link on 56 +6 +9 +37 +(1, 0, 0, 0, 0, −1) +link on 73 +6 +9 +37 +(0, 0, 0, −1, 0, 1) +link on 73 +6 +9 +37 +(1, 0, 1, −1, 0, −1) +link on 73 +6 +9 +40 +(0, 0, 0, 0, 1, −1) +addition of a 0 to 5.3, articulation +6 +9 +49 +(0, 0, 1, −1, 0, 0) +addition of a 0 to 5.3, articulation +6 +9 +49 +(1, 0, −1, −1, 0, 1) +link on 69 +6 +8 +56 +(−1, 1, 0, 0, 0, 0) +addition of a 0 to 5.3, articulation +6 +8 +56 +(1, 1, 0, −2, 0, 0) +link on 64 +6 +8 +57 +(−1, 0, 1, 0, 0, 0) +link 93 +6 +8 +61 +(0, 0, 0, 0, −1, 1) +addition of a 0 to 5.3, articulation +6 +8 +64 +(1, 1, 0, −2, 0, 0) +expansion of 5.7 +6 +8 +66 +(0, 0, 1, 0, −1, 0) +addition of a 0 to 5.3 +6 +8 +69 +(0, 1, 1, −1, −1, 0) +link 5.7 and 5.3 +6 +8 +71 +(0, 0, 0, 1, −1, 0) +addition of a 0 to 5.3 +6 +8 +73 +(1, 0, 0, 0, 0, −1) +addition of a 0 to 5.3 +6 +8 +73 +(0, 0, 0, −1, 0, 1) +addition of a 0 to 5.3 +6 +8 +73 +(1, 0, 1, −1, 0, −1) +soldering two 5.7 +6 +8 +74 +(0, −1, 0, 1, 0, 0) +link and articulation 5.7 +6 +7 +75 +(0, −1, 0, 1, 0, 0) +link 101 +6 +7 +76 +(0, 0, −1, 0, 1, 0) +link 103 +6 +7 +81 +(0, −1, 0, 1, 0, 0) +link and articulation 5.7 +6 +7 +82 +(1, 0, −1, 0, −1, 1) +link 104 +6 +7 +88 +(0, −1, 0, 1, 0, 0) +articulation on 5.7 +6 +7 +90 +(1, 0, 0, −1, 0, 0) +articulation on 5.7 +6 +7 +90 +(1, −1, 0, 0, 0, 0) +articulation on 5.7 +6 +7 +91 +(1, 0, −1, 0, 0, 0) +addition of a 0 to 5.3 +6 +7 +92 +(−1, 1, 1, 1, −1, −1) +link on 5.1 +6 +7 +93 +(0, 0, 0, 1, 0, −1) +addition of a 0 to 5.3, articulation +6 +7 +93 +(1, −1, −1, 0, 1, 0) +6 +6 +101 +(0, 1, 0, −1, 0, 0) +addition of a 0 to 5.3, articulation +6 +6 +103 +(0, 0, 1, −1, 0, 0) +addition of a 0 to 5.3, articulation +6 +6 +104 +(0, 1, 0, −1, 0, 0) +addition of a 0 to 5.3, articulation +6 +6 +104 +(1, 0, −1, 0, −1, 1) +articulation on 5.7 +Table 11: Six node graphs with soft nodes and eigenvalue 2. +49 + +9.3 +3s +nodes +links +classification [1] +eigenvector +connection +3 +3 +2 +(−1, 1, 0) +4 +4 +6 +(−1, 1, 0, 0) +articulation 5.3 +5 +7 +11 +(0, −1, 0, 0, 1) +articulation 5.3 +5 +6 +13 +(−1, 0, 0, −1, 0) +articulation 5.3 +5 +8 +13 +(0, 1, 0, 0, −1) +articulation 5.3 +5 +7 +16 +(−1, 1, 0, −1, 1) +addition of +zero to chain 4 +5 +7 +17 +(0, 1, 0, −1, 0) +articulation 5.3 +5 +6 +20 +(−1, 1, 0, 0, 0) +articulation 5.3 +5 +6 +20 +(0, 0, 0, −1, 1) +articulation 5.3 +5 +6 +22 +(0, 0, −1, 0, 1) +articulation 5.3 +5 +5 +23 +(−1, 1, 0, 0, 0) +articulation 5.3 +5 +5 +25 +(−1, 1, 0, 0, 0) +articulation 5.3 +Table 12: Five node graphs with soft nodes and eigenvalue 3. +50 + +nodes +links +classification [1] +eigenvector +connection +6 +13 +3 +(−1, 0, 2, 0, 0, −1) +link 8 +6 +12 +6 +(0, −1, 0, 1, 0, 0) +link 11 +6 +12 +6 +(0, −1, 0, −1, 0, 2) +link 8 +6 +12 +8 +(0, −2, 0, 0, 1, 1) +link 11 +6 +11 +11 +(1, 0, −1, 0, 0, 0) +link 16 +6 +11 +16 +(0, 1, 0, −1, 0, 0) +link 25 +6 +11 +16 +(0, −1, 0, −1, 0, 2) +link 17 +6 +11 +17 +(0, 2, 0, −1, −1, 0) +link 52 +6 +11 +17 +(1, 0, −2, 0, 0, 1) +link 52 +6 +10 +19 +(1, 0, 0, −1, 0, 0) +link 39 +6 +10 +25 +(0, 0, 0, −1, 0, 1) +6 +10 +29 +(1, 0, −1, 0, 0, 0) +6 +10 +32 +(0, 1, 0, −1, 0, 0) +link 52 +6 +10 +32 +(1, 0, −1, 0, 0, 0) +link 52 +6 +9 +36 +(0, 0, 0, 0, 1, −1) +articulation 5.2 +6 +9 +38 +(1, 0, −1, 0, 0, 0) +articulation 5.13 +6 +9 +38 +(0, 1, 0, −1, 0, 0) +link 63 +6 +9 +39 +(1, 0, 0, −1, 0, 0) +link 63 +6 +9 +51 +(1, 1, 0, −1, −1, 0) +link 106 +6 +9 +51 +(0, 0, 1, −1, 1, −1) +link 106 +6 +9 +52 +(0, 1, 0, 1, 0, −2) +6 +9 +52 +(1, 0, 1, 0, −2, 0) +6 +9 +52 +(1, 0, −1, 0, 0, 0) +link 106 +6 +9 +52 +(0, 1, 0, −1, 0, 0) +link 106 +6 +8 +58 +(−1, 1, 1, −1, 0, 0) +link 79 +6 +8 +61 +(0, −1, 1, 0, 0, 0) +articulation 5.2 +6 +8 +62 +(0, 0, 0, 0, −1, 1) +articulation 5.2 +6 +8 +63 +(0, 1, 0, −1, 0, 0) +link 91 +6 +8 +70 +(0, −1, 1, 1, −1, 0) +link 106 +6 +8 +70 +(−1, 0, 1, 1, 0, −1) +link 106 +6 +8 +74 +(−1, 0, 1, −1, 0, 1) +link 106 +6 +8 +74 +(0, 0, 0, −1, 0, 1) +link 106 +6 +8 +74 +(−1, 0, 1, 0, 0, 0) +link 106 +6 +7 +79 +(−1, 1, 0, 0, 0, 0) +articulation 5.2 +6 +7 +79 +(0, 0, 0, −1, 1, 0) +articulation 5.2 +6 +7 +83 +(−1, 1, 0, 0, 0, 0) +articulation 5.2 +6 +7 +84 +(−1, 0, 1, 0, 0, 0) +articulation 5.2 +6 +7 +84 +(0, 0, 0, −1, 0, 1) +articulation 5.2 +6 +7 +88 +(0, 0, 0, 0, −1, 1) +articulation 5.2 +6 +7 +91 +(0, −1, 0, 1, 0, 0) +6 +7 +92 +(1, −1, 0, 1, −1, 0) +link 106 +6 +7 +92 +(0, 1, −1, 0, 1, −1) +link 106 +6 +6 +94 +(0, 0, 0, 0, 1, −1) +articulation 5.2 +6 +6 +97 +(0, 0, 0, 0, 1, −1) +articulation 5.2 +6 +6 +99 +(1, −2, 1, 2, −2, 0) +soldering P3 and C3 +6 +6 +99 +(0, 0, 0, 0, 1, −1) +articulation 5.2 +6 +6 +100 +(1, −2, 1, 1, −1, 0) +soldering P3 and C3 +6 +6 +100 +(0, 0, 0, 0, 1, −1) +articulation 5.2 +6 +6 +106 +(−1, 0, 1, −1, 0, 1) +cycle 6 +6 +6 +106 +(−1, 1, 0, −1, 1, 0) +cycle 6 +Table 13: Six node graphs with soft nodes and eigenvalue 3. +51 + +9.4 +4s +nodes +links +classification [1] +eigenvector +connection +4 +5 +5 +(1, −2, 1, 0) +5 +8 +12 +(1, 0, 0, 0, −1) +link on 17 +5 +7 +14 +(0, 1, 0, −1, 0) +5 +7 +14 +(−1, 0, 0, 1, 0) +link on 19 +5 +7 +17 +(−1, 0, 0, 0, 1) +5 +7 +18 +(−2, 1, 0, 1, 0) +articulation on 5 +5 +7 +19 +(0, 1, 0, −1, 0) +Table 14: Five node graphs with soft nodes and eigenvalue 4. +52 + +nodes +links +classification [1] +eigenvector +connection +6 +10 +9 +(0, 0, −1, 1, 0, 0) +6 +10 +9 +(0, 0, −1, 1, 0, 0) +6 +10 +9 +(0, 0, −1, 1, 0, 0) +6 +10 +13 +(0, 0, −1, 1, 0, 0) +6 +10 +13 +(−1, 0, 0, 0, 0, 1) +6 +10 +14 +(0, 0, −1, 0, 1, 0) +6 +10 +16 +(−1, 0, 1, 0, 0, 0) +6 +10 +18 +(−1, 0, −1, 1, 0, 1) +link 6.31 +6 +10 +18 +(1, 0, 0, 0, 0, −1) +link 6.31 +6 +10 +21 +(1, 0, 0, 0, 0, −1) +link 6.31 +6 +10 +24 +(1, 0, 0, 0, 0, −1) +link 6.31 +6 +10 +29 +(0, 0, 0, 1, 0, −1) +link 6.41 +6 +9 +31 +(−1, 0, −1, 1, 0, 1) +link 6.31 +6 +9 +31 +(0, −0.6586, −0.2574, 0.2574, 0.6586, 0) +6 +9 +31 +(1, 0, 0, 0, 0, −1) +6 +9 +33 +(0, 0, −1, 1, 0, 0) +6 +9 +35 +(0, −1, 1, 0, 0, 0) +6 +9 +36 +(0, −1, 1, 0, 0, 0) +link 6.53 +6 +9 +36 +(0, 1, −1, 0, 0, 0) +link 6.53 +6 +9 +41 +(0, 0, 0, 1, 0, −1) +link 6.48 +6 +9 +48 +(1, −1, 1, 0, −1, 0) +link 6.93 +6 +9 +48 +(0, 0, 0, 1, 0, −1) +link 6.49 +6 +9 +49 +(0, −1, 0, 0, 1, 0) +link 6.78 +6 +9 +49 +(−1, 0, 0, 0, 0, −1) +link 6.78 +6 +8 +53 +(−1, 1, 0, 0, 0, 0) +link 6.78 +6 +8 +53 +(0, 1, −1, 0, 0, 0) +link 6.78 +6 +8 +53 +(0, −1, 1, 0, 0, 0) +articulation 5.5 +6 +8 +55 +(−1, 0, 1, 0, 0, 0) +articulation 5.5 +6 +8 +55 +(0, −1, 1, 0, 0, 0) +articulation 5.5 +6 +8 +61 +(0, 0, 0 − 2, 1, 1) +6 +8 +62 +(−1, 1, 0, 0, 0, 0) +link 6.78 +6 +8 +64 +(−1, 1, 0, 0, 0, 0) +articulation 5.17 +6 +8 +65 +(0, −1, 1, 0, 0, 0) +6 +8 +69 +1 arbitrary zero +link 6.93 +6 +8 +71 +(1, −1, 1, 0, −1, 0) +link 6.93 +6 +8 +73 +(0, 1, 0, 0, −1, 0) +soldering 5.7 +6 +7 +75 +(−2, 1, 0, 1, 0, 0) +articulation 5.18 +6 +7 +78 +(0, −1, 0, 1, 0, 0) +articulation 5.5 +6 +7 +80 +(0, 0, −1, 0, 1, 0) +articulation 5.5 +6 +7 +81 +(−2, 1, 0, 1, 0, 0) +articulation 5.18 +6 +7 +82 +(0, −1, 0, 1, 0, 0) +articulation 5.19 +6 +7 +93 +(1, −1, 1, 0, −1, 0) +Table 15: Six node graphs with soft nodes and eigenvalue 4. +53 + +9.5 +5s +nodes +links +classification [1] +eigenvector +connection +5 +10 +10 +(1, −1, 0, 0, 0) +5 +10 +10 +(0, 1, −1, 0, 0) +5 +10 +10 +(0, 0, 1, −1, 0) +5 +10 +10 +(0, 0, 0, 1, −1) +link on 5.11 +5 +9 +11 +(1, 0, −1, 0, 0) +link on 5.12 +5 +9 +11 +(0, 0, 1, −1, 0) +5 +8 +12 +(0, 1, 0, −1, 0) +link 5.15 +5 +8 +13 +(1, 0, −2, 1, 0) +add 2 soft nodes to 5.3 +5 +7 +15 +(1, 1, −3, 1, 0) +add soft node to 4 star +5 +7 +15 +(0, 0, −1, 1, 0) +add 3 soft nodes to 5.1 +Table 16: Five node graphs with soft nodes and eigenvalue 5. +54 + +nodes +links +classification [1] +eigenvector +connection +6 +13 +3 +(1, 0, 0, 0, 0, −1) +6 +12 +5 +(−1, 1, 0, 0, 0, 0) +link 6.12 +6 +12 +5 +(1, 1, 0, 0, 0, −2) +link 6.14 +6 +12 +8 +(0, 0, 0, 0, 1, −1) +link 6.12 +6 +12 +8 +(1, 0, 0, −1, 0, 0) +6 +11 +10 +(1, −1, 0, 0, 0, 0) +link 6.19 +6 +11 +10 +(−1, 1, −1, 1, 0, 0) +link 6.19 +6 +11 +10 +(1, −1, −1, 1, 0, 0) +6 +11 +11 +(1, 1, 0, 0, 0, −2) +link 6.14 +6 +11 +12 +(1, −1, 0, 0, 0, 0) +link 6.14 +6 +11 +14 +(1, 1, 0, 0, 0, −2) +link 6.29 +6 +11 +14 +(1, −1, 0, 0, 0, 0) +6 +11 +17 +(−1, 0, 0, 1, −1, 1) +inserting a matching between two 5.2 +6 +11 +17 +(0, 0, 0, 1, −1, 0) +add 3 soft nodes to 5.1 +6 +10 +19 +(−1, 1, 1, −1, 0, 0) +link on 6.30 +6 +10 +19 +(0, 1, −1, 0, 0, 0) +link on 6.19 +6 +10 +20 +(1, 0, −2, 1, 0, 0) +link on 6.33 +6 +10 +20 +(0, 0, −1, 1, 0, 0) +link on 6.35 +6 +10 +23 +(0, 1, 0, 0, −1, 0) +link on 6.32 +6 +10 +23 +(−1, 0, −1, 1, 0, 1) +link on 6.30 +6 +10 +26 +(0, 0, 0, 1, 0, −1) +link on 6.32 +6 +10 +29 +(1, −2, 1, 0, 0, 0) +link on 6.32 +6 +10 +30 +(1, −1, 0, 1, −1, 0) +link on 6.70 +6 +10 +32 +(0, 1, 0, 0, 0, −1) +link on 6.32 +6 +9 +33 +(1, −3, 1, 1, 0, 0) +link on 6.56 +6 +9 +34 +(0, 1, 0, −1, 0, 0) +link on 6.45 +6 +9 +35 +(1, 0, 0, −1, 0, 0) +link on 6.45 +6 +9 +38 +(1, −1, 1, −1, 0, 0) +articulation on 5.13 +6 +9 +39 +(1, −2, 0, 1, 0, 0) +articulation on 5.13 +6 +9 +44 +(−1, 0, 1, −1, 0, 1) +link on 6.70 +6 +9 +45 +(0, 0, 1, −1, 0, 0) +link on 6.57 +6 +9 +51 +(0, 0, 1, −1, 1, −1) +link on 6.70 +6 +8 +56 +(1, 1, −3, 1, 0, 0) +articulation on 5.15 +6 +8 +57 +(0, −1, 0, 1, 0, 0) +articulation on 5.15 +6 +8 +70 +(−1, 0, 1, −1, 0, 1) +insert matching on cycle 3 +Table 17: Six node graphs with soft nodes and eigenvalue 5. +55 + +9.6 +6s +nodes +links +classification [1] +eigenvector +connection +6 +13 +1 +(1, −1, 0, 0, 0, 0) +6 +13 +1 +(1, 0, −1, 0, 0, 0) +6 +13 +1 +(1, 0, 0, −1, 0, 0) +6 +13 +1 +(1, 1, −2, 0, 0, 0) +link 6.3 +6 +13 +2 +(1, −1, 0, 0, 0, 0) +6 +13 +2 +(1, 0, −1, 0, 0, 0) +6 +13 +2 +(1, 0, 0, −1, 0, 0) +6 +13 +3 +(1, −2, 0, 1, 0, 0) +link 6.4 +6 +12 +3 +(0, 1, 0, 0, −1, 0) +link 6.4 +6 +12 +4 +(1, −2, 0, 1, 0, 0) +6 +12 +4 +(0, 1, 0, 0, −1, 0) +link 6.6 +6 +12 +5 +(0, 0, 1, 0, −1, 0) +link 6.7 +6 +12 +6 +(1, 1, 0, 0, 0, −2) +link 6.16 +6 +12 +6 +(1, 0, −1, 0, 0, 0) +link 6.7 +6 +12 +7 +(0, 1, 0, 0, −1, 0) +add four soft nodes to 5.1 +6 +12 +8 +1 arbitrarily placed 0 +6 +11 +9 +(1, 1, 0, −1, 1, 0) +add two soft nodes to 5.7 +6 +11 +11 +(1, 1, 1, −4, 1, 0) +add a soft node to 5.28 +6 +11 +13 +1 arbitrarily placed 0 +6 +11 +16 +(1, 0, 1, 0, −2, 0) +add 3 soft nodes to 5.2 +6 +9 +37 +(1, 0, 0, −1, 0, 0) +add 4 soft node to 5.1 +Table 18: Six node graphs with soft nodes and eigenvalue 6. +56 + diff --git a/m9E_T4oBgHgl3EQf7Ry5/content/tmp_files/load_file.txt b/m9E_T4oBgHgl3EQf7Ry5/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..bc69286f36b1d97b06f1e8f35fc885fb659cc274 --- /dev/null +++ b/m9E_T4oBgHgl3EQf7Ry5/content/tmp_files/load_file.txt @@ -0,0 +1,2124 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf,len=2123 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='08369v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='SP] 20 Jan 2023 Eigenvectors of graph Laplacians: a landscape Jean-Guy CAPUTO and Arnaud KNIPPEL January 23, 2023 Laboratoire de Math´ematiques, INSA de Rouen Normandie, Normandie Universit´e 76801 Saint-Etienne du Rouvray, France E-mail: caputo@insa-rouen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='fr, arnaud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='knippel@insa-rouen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='fr Abstract We review the properties of eigenvectors for the graph Laplacian ma- trix, aiming at predicting a specific eigenvalue/vector from the geometry of the graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' After considering classical graphs for which the spectrum is known, we focus on eigenvectors that have zero components and ex- tend the pioneering results of Merris (1998) on graph transformations that preserve a given eigenvalue λ or shift it in a simple way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' These transformations enable us to obtain eigenvalues/vectors combinatorially instead of numerically;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' in particular we show that graphs having eigen- values λ = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , 6 up to six vertices can be obtained from a short list of graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For the converse problem of a λ subgraph G of a λ graph G”, we prove results and conjecture that G and G” are connected by two of the simple transformations described above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1 Introduction The graph Laplacian is an important operator for both theoretical reasons and applications [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' As its continuous counterpart, it arises naturally from conser- vation laws and has many applications in physics and engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The graph Laplacian has real eigenvalues and eigenvectors can be chosen orthogonal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This gives rise to a Fourier like description of evolution problems on graphs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' an ex- ample is the graph wave equation, a natural model for weak miscible flows on a network, see the articles [2], [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This simple formalism proved very useful for modeling the electrical grid [4] or describing an epidemic on a geographi- cal network [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Finally, a different application of graph Laplacians is spectral clustering in data science, see the review [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1 Almost sixty years ago, Mark Kac [7] asked the question : can one Hear the Shape of a Drum?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Otherwise said, does the spectrum of the Laplacian characterize the graph completely ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We know now that there are isospectral graphs so that there is no unique characterization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' However, one can ask a simpler question: can one predict eigenvalues or eigenvectors from the geometry of the graph?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' From the literature, this seems very difficult, most of the results are inequalities, see for example the beautiful review by Mohar [8] and the extensive monograph [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Many of the results shown by Mohar [8] are inequalities on λ2, the first non zero eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This eigenvalue is related to the important maximum cut problem in graph theory and also others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Mohar [8] also gives some inequalities on λn, the maximum eigenvalue, in terms of the maximum of the sum of two degrees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Another important inequality concerns the interlacing of the spectra of two graphs with same vertices, differing only by an edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' However, little is known about the bulk of the spectrum, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' the eigenvalues between λ2 and λn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' A very important step in that direction was Merris’s pioneering article [10] where he introduced ”Laplacian eigenvector principles” that allow to predict how the spectrum of a graph is affected by contracting, adding or deleting edges and/or of coalescing vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Also, Das [11] showed that connecting an additional vertex to all vertices of a graph increases all eigenvalues (except 0) by one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Following these studies, in [12] we characterized graphs which possess eigen- vectors of components ±1 (bivalent) and 0, ±1 (trivalent).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This is novel because we give exact results, not inequalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Here, we continue on this direction and focus on eigenvectors that have some zero coordinates, we term these soft nodes;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' such soft nodes are important because there, no action can be effected on the associated mechanical system [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' In this article, we use the important proper- ties of graphs with soft nodes, we call these soft-graphs, to highlight eigenval- ues/eigenvectors that can be obtained combinatorially (instead of numerically).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We first show that eigenvalues of graph Laplacians with weights one are integers or irrationals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then we present well known classical graphs whose spectrum is known exactly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We describe five graph transformations that preserve a given eigenvalue and two that shift the eigenvalue in a simple way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Among the trans- formations that preserve an eigenvalue, the link was explicitly introduced in the remarkable article by Merris (link principle) [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The articulation and the soldering were contained in the same paper and we choose to present elemen- tary versions of these transformations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We find two new transformations that preserve an eigenvalue: the regular expansion and the replacement of a cou- pling by a square.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We also present transformations that shift an eigenvalue in a predictable way: insertion of a soft node, addition of a soft node, insertion of a matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The first is new, the second and third were found by Das [11] and Merris [10] respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' In the last part of the article we enumerate all the small graphs up to six ver- tices that have a given eigenvalue λ and explain the relations between them using 2 the transformations discussed previously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' It is remarkable that these graphs can all be obtained from a short list of graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' However the question is open for bigger graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Using the transformations mentioned above, λ soft graphs can be made arbitrarily large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The converse problem of a λ subgraph G of a λ graph G” is considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We show that the matrix coupling the two Laplacians L(G) and L(G′), where G′ = G” − G, is a graph Laplacian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If the remainder graph G′ is λ, then it is formed using the articulation or link transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' It is possible that the remainder graph G′ is not λ as long as it shares an eigenvector with G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then the two may be related by adding one or several soft nodes to G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Finally, an argument shows that if G′ is not λ and does not share an eigenvector with G, the problem has no solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We finish the article by examining the λ soft graphs for λ = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , 6 and insist on minimal λ soft graphs as generators of these families, using the transformations above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The article is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Section 2 introduces the main definitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' In section 3 we consider special graphs (chains, cycles, cliques, bipartite graphs) whose Laplacian spectrum is well known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The graph transformations preserving an eigenvalue are presented in section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Section 5 introduces graph transfor- mations which shift eigenvalues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Finally section 6 introduces λ soft graphs, discusses λ sub-graphs and presents a classification of graphs up to six vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 2 The graph Laplacian : notation, definitions and properties We consider a graph G(V, E) with a vertex set V of cardinality n and edge set E of cardinal m where n, m are finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The graph is assumed connected with no loops and no multiple edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The graph Laplacian matrix [9] is the (n, n) matrix L(G) or L such that Lij = −1 if edge i j exists, 0 otherwise, Lii = mi, degree of i, (1) where the degree of i is the number of edges connected to vertex i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The matrix L is symmetric so that it has real eigenvalues and we can always find a basis of orthogonal eigenvectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Specifically we arrange the eigenvalues λi as λ1 = 0 ≤ λ2 ≤ · · · ≤ λn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (2) We label the associated eigenvectors v1, v2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , vn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the following properties v1 = 1 the vector whose all components are 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let vi k be the k component of an eigenvector vi, i > 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' An immediate consequence of the vi being orthogonal to v1 is � k vi k = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 3 A number of the results we present hold when Lij ̸= −1 and Lii = � j∼i Lij , this is the generalized Laplacian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We will indicate which as we present them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Regular graphs The graph Laplacian can be written as L = D − A where A is the adjacency matrix and D is the diagonal matrix of the degrees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We recall the definition of a regular graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 (Regular graph) A graph is d-regular if every vertex has the same degree d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For regular graphs D = dIdn, where Idn is the identity matrix of order n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For these graphs, all the properties obtained for L in the present article carry over to A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We will use the following definitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 (Soft node ) A vertex s of a graph is a soft node for an eigen- value λ of the graph Laplacian if there exists an eigenvector x for this eigenvalue such that xs = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' An important result due to Merris [10] is Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 Let G be a graph with n vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If 0 ̸= λ < n is an eigenvalue of L(G) then any eigenvector affording λ has component 0 on every vertex of degree n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 (k-partite graph) A k-partite graph is a graph whose vertices can be partitioned into k different independent sets so that no two vertices within the same set are adjacent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 (cycle) A cycle is a connected graph where all vertices have degree 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 (chain) A chain is a connected graph where two vertices have degree 1 and the other vertices have degree 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 (clique) A clique or complete graph Kn is a simple graph where every two vertices are connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' In the article we sometimes call configuration a vertex valued graph where the values correspond to an eigenvector of the graph Laplacian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Eigenvalues are integers or irrationals We have the following result Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 If the eigenvalue λ is an integer, then there exist integer eigen- vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' To see this consider the linear system (L − λI)X = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' It can be solved using Gauss’s elimination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This involves algebraic manipula- tions so that the result X is rational.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If X is rational, then multiplying by the product of the denominators of the entries, we obtain an eigenvector with integer entries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We now show that the eigenvalues of a graph Laplacian are either integers or irrationals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the following rational root lemma on the roots of poly- nomials with integer coefficients, see for example [13] Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 Rational root Consider the polynomial equation anxn + an−1xn−1 + · · · + a0 = 0 where the coefficients ai are integers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then, any rational solution x = p/q, where p, q are relatively prime is such that p divides a0 and q divides an .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' A consequence of this is Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 The eigenvalues of a graph Laplacian are either integers or irrationals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Consider the equation associated to the characteristic polynomial asso- ciated to the graph Laplacian, it has the form anxn + an−1xn−1 + · · · + a1x, because the graph is connected so that there is only one 0 eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Assume that the eigenvalue is of the form x = p/q with p, q are relatively prime integers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then from the lemma above, p divides a0 and q divides an.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Since an = ±1, q = 1 so that x = p is an integer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' □ The fact that some graphs have integer spectrum was discussed by Grone and Merris [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Many of their results are inequalities for λ2 and λn−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Our results complement their approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5 3 Special graphs 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Cliques and stars The clique Kn has eigenvalue n with multiplicity n − 1 and eigenvalue 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvectors for eigenvalue n can be chosen as vk = e1 − ek, k = 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' To see this note that L = nIn − 1, where In is the identity matrix of order n and 1 is the (n, n) matrix where all elements are 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' A star of n vertices Sn is a tree such that one vertex , say vertex 1, is connected to all the others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For a star Sn, the eigenvalues and eigenvectors are λ = 1 multiplicity n − 2 , eigenvector e2 − ek, k = 3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , n λ = n multiplicity 1 , eigenvector (n + 1)e1 − �n k=2 ek λ = 0 multiplicity 1 , eigenvector ˆ1 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 Bipartite and multipartite graphs Consider a bipartite graph Kn1,n2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The Laplacian is L = \uf8eb \uf8ec \uf8ec \uf8ec \uf8ec \uf8ec \uf8ec \uf8ec \uf8ec \uf8ec \uf8ec \uf8ed n2 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 −1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 0 n2 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 n2 −1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 −1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 n1 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 −1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 0 n1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 0 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' n1 \uf8f6 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f8 , (3) where the top left bloc has size n1 ×n1, and the bottom right bloc n2 ×n2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvalues with their multiplicities denoted as exponents are 01, nn2−1 1 , nn1−1 2 , (n1 + n2)1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Eigenvectors for n1 can be chosen as en1+1 − ei (i = n1 + 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , n1 + n2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvector for n = n1 + n2 is (1/n1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , 1/n1, −1/n2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , −1/n2)T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Similarly, the spectrum of a multipartite graph Kn1,n2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='np is 01, (n − n1)n1−1, (n − n2)n2−1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , (n − np)np−1, np.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvectors associated to n − n1 are composed of 1 and −1 in two vertices of part 1 padded with zeros for the rest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 Cycles For a cycle, the Laplacian is a circulant matrix, therefore its spectrum is well- known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvalues are µk = 4 sin2 �(k − 1)π n � , k = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (4) They are associated to the complex eigenvectors vk whose components are vk j = exp �i(j − 1)(k − 1)2π n � , j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (5) The real eigenvectors wk, xk are, wk = (0, sin(ak), sin(2ak), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , sin((n − 1)ak))T , (6) xk = (1, cos(ak), cos(2ak), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , cos((n − 1)ak))T , (7) ak = 2(k − 1)π n (8) Ordering the eigenvalues, we have λ1 = µ1 = 0, (9) λ2 = λ3 = µ2, (10) λ2k = λ2k+1 = µk+1, (11) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (12) For n = 2p + 1 λ2p = λ2p+1 = µp+1 For n = 2p λ2p = µp = 4 is an eigenvalue of multiplicity 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' an eigenvector is (1, −1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=', 1, −1)T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' In all other cases, the eigenvalues have multiplicity two so that all vertices are soft nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Remark that the maximum number of 0s is n/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' To see this, note that if two adjacent vertices have value 0 then their neighbors in the cycle must have 0 as well and we only have 0s , but the null vector is not an eigenvector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This means that we have at most n/2 0s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This bound is reached for n even.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 Chains For chains Cn, there are only single eigenvalues, they are [15] λk = 4 sin2(π(k − 1) 2n ) , k = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (13) 7 The eigenvector vk has components vk j = cos �π(k − 1) n (j − 1 2) � , j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (14) Obviously the cosine is zero if and only if: (k − 1)(2j − 1) = n(1 + 2m), (15) where m is an integer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' There is no solution for n = 2α, for α a positive integer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Apart from this case, there is always at least one soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If n is a prime number, the middle vertex j = (n + 1)/2 is the only soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For k odd, all vertices j such that 2j − 1 divides n have a zero value, including the middle vertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For n odd, chains and cycles share (n−1)/2 eigenvalues and eigenvectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' To see this consider a chain with n = 2p + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' All k = 2q + 1 give a chain eigenvalue λk = 4 sin2( πq 2p+1) that is also a cycle eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvector components vq j are such that vq 1 = vq 2p+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 4 Transformations preserving eigenvalues In this section, we present four main transformations of graphs such that one eigenvalue is preserved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' These are the link between two vertices, the articula- tion, the soldering and the contraction/expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The first three transforma- tions are in the literature in a general form;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' we choose to present them in their most elementary form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Furthermore, these transformations will all be unary, they act on a single graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Binary transformations can be reduced to unary transformations for non con- nected graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Using these transformations we can generate new graphs that have a soft node, starting from minimal graphs having soft nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Link between two equal vertices An important theorem due to Merris [10] connects equal component vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Link between two vertices : Let λ be an eigenvalue of L(G) for an eigenvector x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If xi = xj then λ is an eigenvalue of L(G′) for x where the graph G′ is obtained from G by deleting or adding the edge e = ij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This transformation preserves the eigenvalue and eigenvector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' It applies to multiple graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1 shows examples of the transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 8 L2S −1 −1 1 1 Ch6 L2S L2S −1 −1 1 1 L2S y6 C −1 −1 1 1 −1 −1 1 1 Ch 2 3 −1 1 1 −1 −1 −1 1 1 Figure 1: Example of the transform : link between two equal vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the following corollary of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 Let λ be an eigenvalue of two graphs G1 and G2 for respective eigenvectors x1, x2 with two vertices i, j, such that x1 i ̸= 0 or x2 j ̸= 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then the graph G(V1 ∪ V2, E1 ∪ E2 ∪ ij) affords the eigenvector y = x2 j �x1 0 � + x1 i � 0 x2 � for λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This allows to generate many more graphs that have an eigenvalue λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 Articulation An elementary transformation inspired by Merris’s principle of reduction and extension [10] is to add a soft node to an existing soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This does not change the eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the following result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 Articulation (A) : Assume a graph G(V, E) with n vertices where x is an eigenvector such that xi = 0 for an eigenvalue λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then, the extension x′ of x such that x′ 1:n = x1:n and x′ n+1 = 0 is an eigenvector for λ for the Laplacian L(G′) where G′(V ′, E′) such that V ′ = V ∪ (n + 1) and E′ = E ∪ i(n + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 9 6 −2 1 1 A 23 1 1 −2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Figure 2: Example of the articulation property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The large dot corresponds to a soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The general case presented by Merris [10] amounts to applying several times this elementary transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The transformation is valid for graphs with arbitrary weights and the extended edges can have arbitrary weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 2 illustrates this property on the two graphs labeled 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='23 in the classification given in [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' An immediate consequence of this elementary transform is that any soft node can be extended into an arbitrarily large graph of soft nodes while preserving the eigenvalue and extending the eigenvector in a trivial way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 3 shows two graphs that have the same eigenvalue λ = 1 and that are connected by the articulation transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1 1 −2 6 −2 1 1 A 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Figure 3: Two graphs connected by the articulation transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 Soldering A consequence of the contraction principle of Merris [10] is that coalescing two soft nodes of a graph leaves invariant the eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This is especially important because we can ”solder” two graphs at a soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 10 Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 Soldering : Let x be an eigenvector affording λ for a graph G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let i and j be two soft nodes without common neighbors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let G′ be the graph obtained from G by contracting i and j and x′ be the vector obtained from x by deleting its jth component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then x′ is an eigenvector of L(G′) for λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 −1 −1 −1 1 1 1 1 −1 −1 −1 −1 1 1 1 1 1 −1 1 −1 1 −1 1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 1 1 −1 −1 −1 1 −1 1 −1 1 1 −1 Ch 6 1 1 −1 −1 Ch 6 1 1 −1 −1 1 −1 1 −1 1 −1 Ch 3 1 −1 Ch 3 1 −1 Ch 3 1 −1 Figure 4: Examples of the soldering transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This transformation is valid for graphs with arbitrary weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 Regular expansion of a graph We have the following theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 Let x be an eigenvector of a graph G for λ and let i be a vertex connected only to p soft nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let G′ be the graph obtained from G by replacing i by a d-regular graph whose k vertices are all connected to the p soft nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then λ = p and an eigenvector x′ of G′ is formed by assigning to the new vertices, the value x′ j = xi/k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Without loss of generality, we can assume that i = n and that the p soft 11 nodes are n − p + 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have \uf8eb \uf8ec \uf8ec \uf8ed .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 −1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 p \uf8f6 \uf8f7 \uf8f7 \uf8f8 \uf8eb \uf8ec \uf8ec \uf8ed .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 0 xn \uf8f6 \uf8f7 \uf8f7 \uf8f8 = λ \uf8eb \uf8ec \uf8ec \uf8ed .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0 0 xn \uf8f6 \uf8f7 \uf8f7 \uf8f8 The nth line reads pxn = λxn so that λ = p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The n − 1th line reads α + (−1)xn = pxn−1 = 0 where α is the sum of the other terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let us detail the eigenvector relation for the Laplacian for G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Consider any new vertex j linked to the p soft nodes and to d new nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The corresponding line of the eigenvector relation for the Laplacian for G′ reads (d + p)x′ j + � i∼j,i≥n (−1)x′ i = λ′x′ j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This implies (d + p − λ′)x′ j = � i∼j,i≥n x′ i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' An obvious solution is λ′ = λ = p, x′ i = x′ n ∀i ≥ n + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The value x′ n is obtained by examining line n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have α + n−k−1 � i=n (−1)x′ i = 0 so that x′ n = xn k .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' In fact, we can get all solutions by satisfying the two conditions ∀j ≥ n dx′ j = � i∼j x′ i, xn = � i≥n x′ i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (16) □ Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5 shows examples of expansion from a single soft node for different values of d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Here the eigenvalue is 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6 shows examples of expansion from two soft nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvalue is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For d = 2, the values at the edges at the bold edges are such that their sum is equal to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For d = 2, the values at the 12 triangle are all equal to t, the same holds for the square with a value s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' These values verify 3t + 4s = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 1 Ch 3 /2 1 /2 1 −1 / 1 4 / 1 4 / 1 4 / 1 4 /2 1 /2 1 −1 / 1 3 / 1 3 / 1 3 / 1 9 / 1 9 / 1 9 /9 2 /9 2 /9 2 −1 d= 0 d=1 −1 −1 d= 2 E E E Figure 5: Examples of expansion from a single soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' /2 1 /1 4 /1 4 /2 1 /2 1 d= 0 −1 −1 E −1 1 E E E −1 −1 d=2 d=1 Figure 6: Examples of expansion from two soft nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For d = 2, the values at the triangle are all equal to t, the same holds for the square with a value s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' These values verify 3t + 4s = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 13 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 Replace coupling by square We have the following transformation that leaves the eigenvalue unchanged [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 (Replace an edge by a soft square) Let x be an eigenvector of the Laplacian of a graph G for an eigenvalue λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let G′ be the graph obtained from G by deleting a joint ij such that xi = −xj and adding two soft vertices k, l ∈ V (G′) for the extension x′ of x (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' x′ m = xm for m ∈ V (G) and x′ k = x′ l = 0) and the four edges ik, kj, il, lj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then, x′ is an eigenvector of the Laplacian of G′ for the eigenvalue λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This result was proved in [12] for a graph with weights 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Here we generalize it to a graph with arbitrary weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvalue relation at vertex i reads (di − λ)xi = � m∼i,m̸=j wi,mxm + wi,jxj Since xi = −xj, this implies (di + wi,j − λ)xi = � m∼i,m̸=j wi,mxm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Introducing the two new vertices k, l such that x′ k = x′ l = 0 connected to i by edges of weights wi,k = αwi,j, wi,l = (1 − α)wi,j, the relation above leads to (di + wi,k + wi,l − λ)x′ i = � m∼i wi,mx′ m + wi,kx′ k + wi,lx′ l, which shows that x′ is eigenvector of the new graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' □ See Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 7 for an illustration of the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 1 1 −1 Figure 7: Replacement of coupling by a square, in both cases the eigenvalue is λ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 14 5 Transversality : change of eigenvalue Here we present operators that change the eigenvalue of a graph Laplacian in a predictable way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The operators shift the eigenvalue λ to λ + 1 for the first two and λ + 2 for the third one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' At the end of the section we introduce the eigenvalue of a product graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Inserting soft nodes Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Let x be an eigenvector of a graph G with weights 1 for λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' As- sume we can pair the non zero components of x as {i, j} where xi = −xj non zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let G′ be the graph obtained from G by including k soft nodes between each pair {i, j}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The vector x′ so obtained is an eigenvector of the Laplacian of G′ for eigenvalue λ + k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let i, j ∈ V (G) be a pair such that xi = −xj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvector equation reads dixi − � m∼i xm = λxi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Introducing k new vertices x′ p = 0, p = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' k we can write the relation as (di + k)x′ i − � m∼i x′ m = (λ + k)x′ i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This shows that x′ is an eigenvector for the new graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' □ λ= λ= λ= λ=1 2 λ= λ=1 −1 1 −1 1 −1 1 2 λ= λ=3 −1 1 −1 1 −1 1 −1 1 3 −1 1 1 −1 1 1 −1 −1 2 3 −1 1 −1 1 Figure 8: Example of the action of inserting a soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 8 shows an example of the action of inserting a soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 15 When the graph is weighed, the result is still valid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Consider that we add only one soft vertex connected to i by a weight wi,k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvalue of the new graph is λ + wi,k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This can transform a graph with an integer eigenvalue to a graph with an irrational eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 Addition of a soft node Connecting a soft node to all the vertices of a graph augments all the non zero eigenvalues by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This result was found by Das [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We recover it here and present it for completeness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 Addition of a soft node : Let G(V, E) be a graph affording an eigenvalue λ ̸= 0 for an eigenvector x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then the new graph G′ obtained by adding a node connected to all the nodes of G has eigenvalue λ + 1 for the eigenvector x′ obtained by extending x by a zero component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' See Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 9 for examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Assume λ to be an eigenvalue with eigenvector v for the Laplacian L(G) of a graph G with n vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Now add an extra vertex n + 1 connected to all vertices of G and form L(G ∪ {n + 1}).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the following identity \uf8eb \uf8ec \uf8ec \uf8ec \uf8ec \uf8ed | −1 L(G) + In | −1 | −1 − − − − − − − − − − − − − | − − −− −1 − 1, · · · − 1 | n \uf8f6 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f8 \uf8eb \uf8ec \uf8ec \uf8ec \uf8ec \uf8ed v −− 0 \uf8f6 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f8 = (λ + 1) \uf8eb \uf8ec \uf8ec \uf8ec \uf8ec \uf8ed v −− 0 \uf8f6 \uf8f7 \uf8f7 \uf8f7 \uf8f7 \uf8f8 which proves the statement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' □ λ= 1 −1 1 1 λ= 2 1 −1 1 λ= λ= −1 −1 2 3 Figure 9: Examples of the addition of a soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Important examples are the ones formed with the special graphs considered above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' There, adding a vertex to an n − 1 graph, one knows explicitly n − 1 eigenvectors and eigenvalues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 16 The theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 by Das [11] can be seen as a direct consequence of adding a soft node and an articulation to a graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 Inserting a matching First we define perfect and alternate perfect matchings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 (Perfect matching) A perfect matching of a graph G is a matching (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=', an independent edge set) in which every vertex of the graph is incident to exactly one edge of the matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 (Alternate perfect matching) An alternate perfect match- ing for a vector v on the nodes of a graph G is a perfect matching for the nonzero nodes such that edges eij of the matching satisfy vi = −vj (̸= 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the following result [12] inspired by the alternating principle of Merris [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 (Add/Delete an alternate perfect matching) Let v be an eigenvector of L(G) affording an eigenvalue λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Let G′ be the graph obtained from G by adding (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' deleting) an alternate perfect matching for v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then, v is an eigenvector of L(G′) affording the eigenvalue λ + 2 (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' λ − 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This is a second operator which shifts eigenvalues by ±2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Examples are given in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 17 λ= −1 1 −1 1 3 λ= λ=1 λ= 3 −1 1 −1 1 λ= 3 λ=1 −1 1 1 −1 1 −1 1 −1 −1 1 −1 1 −1 1 −1 1 5 Figure 10: Examples of inserting a matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 Cartesian product The cartesian product G□H of two graphs G = (V, E) and H = (W, F) has set of vertices V × W = {(v, w), v ∈ V, w ∈ W}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' It’s set of edges is {{(v1, w1), (v2, w2)}} such that v1 v2 ∈ V and w1w2 ∈ W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the follow- ing result, see Merris [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 If x is an eigenvector of G affording µ and y is an eigenvector of H affording ν, then the Kronecker product of x andy , x⊗y is an eigenvector of G□H for the eigenvalue µ + ν.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 11 illustrates the theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 18 Ch3 1 −1 Ch3 Ch3 Ch3 −1 1 1 −1 1 −1 Ch3 −1 Ch3 −1 1 1 −1 1 1 −1 Cy4 Cy4 Figure 11: Cartesian product of two chains 3 (left) and of a cycle 4 and a chain 3 (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Important examples are the ones formed with the special graphs considered above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' There, one knows explicitly the eigenvectors and eigenvalues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For ex- ample, the cartesian product Cn × Cm of two chains Cn and Cm with n and m nodes respectively has eigenvalues λi,j = λi + λj, where λi (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' λj) is an eigenvalue for Cn (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Cm).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvectors are vi,j = cos[π(i − 1) n (p − 1 2)] cos[π(j − 1) m (q − 1 2)], where i, p ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=', n}, j, q ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , m}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 Graph complement We recall the definition of the complement of a graph G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 (Complement of a graph ) Given a graph G(V, E) with n ver- tices, its complement Gc is the graph Gc(V, Ec) where Ec is the complement of E in the set of edges of the complete graph Kn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the following property, see for example [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 If x is an eigenvector of a graph G with n vertices affording λ ̸= 0, then x is an eigenvector of Gc affording n − λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' An example is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The eigenvalues and eigenvectors are given in table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 19 1 2 3 4 5 6 1 4 6 2 5 3 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='35 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 Figure 12: Graph 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='35 (left) in the classification [1] and its complement 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='35 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2361 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 4 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7639 0 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7639 1 2 3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2361 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='51167 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70711 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18257 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19544 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='40825 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31623 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70711 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='36515 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31623 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='40825 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31623 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70711 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='36515 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31623 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='40825 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='51167 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70711 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18257 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19544 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='40825 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='51167 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19544 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='40825 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12079 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='36515 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='82790 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='40825 Table 1: Eigenvalues (top lines) and eigenvectors for the two complementary graphs 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='35 and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 12 Many times, Gc is not connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' An example where Gc is connected is the cycle 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='. 6 λ-soft graphs 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Definitions and properties We introduce the notions of λ, λ soft and λ soft minimal graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The trans- formations of the previous section will enable us to prove the relation between these two types of graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 A graph G affording an eigenvector X for an eigenvalue λ is λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 20 Definition 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 A λ graph G affording an eigenvector X for the eigenvalue λ is λ soft if one of the entries of X is zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Definition 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 A graph G affording an eigenvector X for an eigenvalue λ is λ minimal if it is λ and minimal in the sense of inclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Clearly, for a given λ, there is at least one λ minimal graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' As an example the 1 soft minimal graph is shown below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Ch 3 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 λ subgraph In the following section, we study the properties of a λ subgraph G included in a λ graph G”(V ”, E”).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Consider two graphs G(V, E) with n vertices and G′(V ” − V, E′) such that E only connects elements of V and E′ only connects elements of V ′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Assume two graphs G(n) and G′(n′) are included in a large graph G” and are such that G(V, E) Assume p vertices of G are linked to p′ vertices of G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We label the p vertices of G, n − p + 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , n and the p′ vertices of G′, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , p′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have LX = λX, (17) L”X” = λX”, (18) where L” is the graph Laplacian for the large graph G”;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' L” can be written as L” = �L 0 0 L′ � + \uf8eb \uf8ec \uf8ec \uf8ed 0 0 0 0 0 a −b 0 0 −bT c 0 0 0 0 0 \uf8f6 \uf8f7 \uf8f7 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' A first result is Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 The square matrix δ = � a −b −bT c � is a graph Laplacian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The submatrices a, b, c have respective sizes a(p, p), b(p, p′), c(p′, p′), a and c are diagonal and verify aii = p′ � j=1 bij, cii = p � j=1 bji.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (19) In other words aˆ1p = bˆ1p′, cˆ1p′ = bT ˆ1p, 21 where ˆ1p is a p column vector of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' □ At this point, we did not assume any relation between the eigenvectors X for G and X” for G”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have the following Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 The eigenvalue relations (17,18) imply either X = X”(1 : n) or X(1 : n − p) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For p = 1 and λ a single eigenvalue, rank(L − λI) = n − 1 so either X = X”(1 : n) or X(1 : n − 1) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We admit the result for p > 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' □ We can then assume that the eigenvectors of L” have the form L” � X X′ � = λ � X X′ � , where LX = λX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Substituting L”, we get λ �X X′ � = �L 0 0 L′ � �X X′ � + \uf8eb \uf8ec \uf8ec \uf8ed 0 0 0 0 0 a −b 0 0 −bT c 0 0 0 0 0 \uf8f6 \uf8f7 \uf8f7 \uf8f8 �X X′ � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Using the relation (17) we obtain � 0 0 0 a � X + � 0 0 −b 0 � X′ = 0, (20) L′X′ − �0 bT 0 0 � X + �c 0 0 0 � X′ = λX′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (21) There are p non trivial equations in the first matrix equation and p′ in the second one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Using an array notation (like in Fortran),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' the system above can be written as aX(n − p + 1 : n) − bX′(1 : p′) = 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (22) −bT X(n − p + 1 : n) + cX′(1 : p′) + (L′X′)(1 : p′) = λX′(1 : p′),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (23) (L′X′)(p′ + 1 : n′) = λX′(p′ + 1 : n′),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (24) Extracting X from the first equation,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' we obtain X(n − p + 1 : n) = a−1bX′(1 : p′),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (25) and substituting in the second equation yields the closed system in X′ (−bT a−1b + c)X′(1 : p′) + (L′X′)(1 : p′) = λX′(1 : p′),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (26) (L′X′)(p′ + 1 : n′) = λX′(p′ + 1 : n′),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (27) where we used the fact that the matrix a of the degrees of the connections is invertible by construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 22 Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 The matrix ∆ ≡ −bT a−1b + c, is a generalized graph Laplacian: it is a Laplacian of a weighted graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Its entries are rationals and not necessarily integers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' To prove this, note first that ∆ is obviously symmetric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have ∆ˆ1p′ = −bT a−1bˆ1p′ + cˆ1p′ = −bT a−1aˆ1p + bT ˆ1p = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This shows that the each diagonal element of ∆ is equal to the sum of it’s corresponding row so that ∆ is a graph Laplacian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' □ From theorem (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10), the eigenvalues of ∆ are integers or irrationals and correspond to eigenvectors with integer or irrational components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We then write equations (26,27) as ( ¯∆ + L′)X′ = λX′, (28) where ¯∆ = �∆ 0 0 0 � This is an eigenvalue relation for the graph Laplacian ( ¯∆ + L′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Four cases occur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (i) λ = 0 then X′ is a vector of equal components and X also.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (ii) λ ̸= 0 is an eigenvalue of L′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then one has the following Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 Assume a graph G” is λ for an eigenvector X” = (X, X′)T and contains a λ graph G for the eigenvector X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Consider the graph G′ with vertices V (G”) − V (G) and the corresponding edges in G”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If G′ is λ then G” is obtained from G using the articulation or link trans- formations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Since λ ̸= 0 is an eigenvalue of L′, we can choose X′ an eigenvector for λ so that L′X′ = λX′, then ∆X′ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' A first possibility is X′ = 0, this corresponds to an articulation between G and G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If X′ ̸= 0, L′X′ = λX′, implies that X′ is not a vector of equal components so that X′ /∈ Null(∆).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The only possibility for ∆X′ = 0 is ∆ = 0 so that c = bT a−1b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The term (bT a−1b)ij is (bT a−1b)ij = p � k=1 bkibkj akk .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 23 Since the matrix c is diagonal, we have p � k=1 bkibkj akk = 0, ∀i ̸= j Then bkibkj = 0 so that a vertex k from G is only connected to one other vertex i or j from G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then p = p′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' This implies aii = cii = 1, ∀i ∈ {1, , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , p}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The graphs G and G′ are then connected by a number of edges between vertices of same value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' □ (iii) λ ̸= 0 is not an eigenvalue of L′ and L′ and ¯∆ share a common eigenvector X′ for eigenvalues λ′ and λ − λ′ > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For λ − λ′ = 1, a possibility is to connect a soft node of G to G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For λ − λ′ = p integer, a possibility is to connect p soft nodes of G to G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We conjecture that there are no other possibilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (iv) λ ̸= 0 is not an eigenvalue of L′ and L′ and ¯∆ have different eigenvectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Then there is no solution to the eigenvalue problem (28).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' To see this, assume the eigenvalues and eigenvectors of L′ and ¯∆ are respectively νi, V i, µi, W i so that L′V i = νiV i, ¯∆W i = µiW i, i = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' n The eigenvectors can be chosen orthonormal and we have QV = WQ where Q = (qj k) is an orthogonal matrix, V and W are the matrices whose columns are respectively V i and W i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We write W j = � k qj kV k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Assuming X′ exists, we can expand it as X′ = � i αiV i Plugging this expansion intro the relation ( ¯∆ + L′)X′ = λX′ yields � i \uf8eb \uf8edαiνiV i + αi � j qi jµj � k qj kV k \uf8f6 \uf8f8 = � i λαiνiV i Projecting on a vector V m we get αmνm + αm � j qm j µjqj m = λαmνm A first solution is αm = 0, ∀m so that X′ = 0, an articulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If αm ̸= 0 then we get the set of linear equations linking the νi to the µi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' � j qm j µjqj m = (λ − 1)νm, m = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' n Since Q is a general orthogonal matrix, the terms qm j are irrational in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Therefore we conjecture that there are no solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 24 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 Examples of λ subgraphs Using simple examples, we illustrate the different scenarios considered above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We first consider theorem (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7), see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' G G" G’ −1 1 G 1 3 2 4 5 6 7 −1 1 1 G’ G −1 G" 4 5 6 7 1 3 2 Figure 13: Two configurations where a graph G is included in a larger graph G” for the eigenvalue 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Consider the configuration on the left of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have L = \uf8eb \uf8ed 1 −1 0 0 1 1 −1 −1 2 \uf8f6 \uf8f8 , L′ = \uf8eb \uf8ec \uf8ec \uf8ed 1 −1 0 0 −1 3 −1 −1 0 −1 1 0 0 −1 0 1 \uf8f6 \uf8f7 \uf8f7 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (29) Note that L and L′ have 1 as eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Here p = 1, p′ = 3 and a = 3, b = (1, 1, 1)T, c = \uf8eb \uf8ed 1 0 0 0 1 0 0 0 1 \uf8f6 \uf8f8 , so that ∆ = \uf8eb \uf8ed 2 3 − 1 3 − 1 3 − 1 3 2 3 − 1 3 − 1 3 − 1 3 2 3 \uf8f6 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The matrices ¯∆ and L′ have different eigenvectors for the same eigenvalue 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Choosing X′ an eigenvector of L′ for the eigenvalue 1 yields ¯∆X′ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The only solution is X′ = 0, this is an articulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 25 For the configuration on the right of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 13 we have p = p′ = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' a = \uf8eb \uf8ed 1 0 0 0 1 0 0 0 1 \uf8f6 \uf8f8 , b = \uf8eb \uf8ed 1 0 0 0 0 1 0 0 0 0 1 0 \uf8f6 \uf8f8 , c = \uf8eb \uf8ed 1 0 0 0 1 0 0 0 1 \uf8f6 \uf8f8 , so that ∆ = \uf8eb \uf8ed 0 0 0 0 0 0 0 0 0 \uf8f6 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have LX = 1X, (30) L”(X, X′)T = 1(X, X′)T , (31) where X = (X1, X2, X3)T In this configuration, X′ is an eigenvector of L′ for the eigenvalue 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' and we have Link connections between G and G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Finally, we show an example of case (iii) where G, G” are 2 soft and G′ is 1 soft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' G 1 G’ −1 1 −1 8 7 6 5 1 2 3 4 Figure 14: An example of case (iii) for eigenvalue λ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We have to solve ( ¯∆ + L′)X′ = 2X′ where L = \uf8eb \uf8ec \uf8ec \uf8ed 2 −1 0 −1 −1 2 −1 0 0 −1 2 −1 −1 0 −1 2 \uf8f6 \uf8f7 \uf8f7 \uf8f8 , L′ = \uf8eb \uf8ec \uf8ec \uf8ed 1 0 −1 0 0 1 −1 0 −1 −1 3 −1 0 0 −1 1 \uf8f6 \uf8f7 \uf8f7 \uf8f8 , ¯∆ = \uf8eb \uf8ed 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 0 0 0 0 \uf8f6 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Note that the eigenvector X′ = (1, −1, 0, 0)T is shared by L′ and ¯∆ so that ( ¯∆ + L′)X′ = 2X′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The transformations introduced in the two previous sections enable us to link the different members of a given class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' To summarize, we have Articulation : one can connect any graph G2 to the soft nodes of a given graph G1 and keep the eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The new graph G1 ∪G2 has soft nodes everywhere in G2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 26 Link : introducing a link between equal nodes does not change the eigen- value and eigenvector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Contraction of a d-regular graph linked to a soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' To have minimal graphs in the sense of Link we need to take d = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Soldering : one can connect two graphs by contracting one or several soft nodes of each graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' In the next subsections we present a classification of small size λ soft graphs for different λs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 1-soft graphs 1 1 −2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 2 1 1 4 −8 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 1 1 −1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 1 1 −4 2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 2 1 1 −2 −2 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 2 2 2 −3 −3 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 Ch 3 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 1 1 1 −3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 1 1 2 2 −6 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 1 1 1 1 −4 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 Figure 15: 1s graphs: graphs generated by expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 15 shows some of the 1s graphs generated by expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Note the variety of possibilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 27 1 −1 1 −1 Ch 3 1 −1 1 −1 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='29 1 −1 1 −1 1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='108 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='109 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='111 Figure 16: 1s graphs: graphs generated by articulation Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 16 shows some of the 1s graphs generated by articulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The 1, 0, −1 configuration remains clearly visible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 28 Ch 6 1 1 −1 −1 y6 C −1 −1 1 1 112 79 1 1 1 1 −4 1 −1 107 8 2 1 1 1 −3 1 −1 109 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 8 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Ch 3 1 −1 8 2 1 1 −1 −1 6 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 23 1 1 −2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 97 1 1 −26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 79 1 1 −1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −3/ 2 −3/ 2 1 1 1 36 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 4 − 1 1 1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 A C C C C Link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='92 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6 0 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='95 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='94 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 101 108 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='99 A A C −2 1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='23 Link C A A C 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 1 −1 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='23 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='20 A 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='97 A 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='23 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 C 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='53 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='64 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='75 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='58 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='56 −1 1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 1 −1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='109 − 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='94 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='75 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='53 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='108 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='33 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='38 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='36 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='61 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='79 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='111 Figure 17: 1-soft graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The soft nodes are in boldface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We only present symmetric expansions so that links are possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 17 shows the 1s graphs with at most 6 vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Notice how they are linked by articulation (A), expansion/contraction (C) and links and can all be obtained from the graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 (chain 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The connection Ch3 - 28 is a contraction of two Ch3 chains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Connecting two 3 chains Ch3 with an Link transformation we obtain a chain 6 Ch6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' One can also go from Ch6 to 23 by soldering the two soft nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 2-soft graphs Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 18 shows some of the 2s graphs generated by expansion of the 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 29 −1 2 1 1 1 1 2 −4 −1 −1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22 −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 −3 1 1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 Figure 18: 2s graphs: graphs generated by expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Similarly Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 19 shows some of the 2s graphs generated by articulation from the same graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 1 −1 1 −1 1 −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='103 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='104 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='26 Figure 19: 2s graphs: graphs generated by articulation Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 20 shows all 2s graphs with at most 6 vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We included graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 because with a link it gives configuration 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Notice how all graphs can be generated from 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 30 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='103 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='52 C A Cy 1 −1 −1 −1 1 1 −1 1 1 −2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='17 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='88 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 A 1 −1 A 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='104 A −1 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='104 1 A 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='82 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='103 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='57 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='76 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='75 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='49 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 −2 1 1 1 −1 1 1 1 −3 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='26 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='91 1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='81 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='76 Figure 20: 2-soft graphs 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 3-soft graphs Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 21 shows a 3s graph generated by expansion of graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1 1 1 1 −2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='52 Figure 21: 3s graphs: graphs generated by expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 31 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 22 shows some 3s graphs generated by articulation on graphs 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1 −1 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='23 1 −1 1 −1 1 −1 1 −1 1 −1 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='25 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='100 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='99 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='97 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='94 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22 −1 1 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='90 −1 1 −1 1 −1 1 Figure 22: 3s graphs: graphs generated by articulation 32 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='100 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='17 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 1 −1 −1 −1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='99 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='25 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='97 1 1 1 1 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='94 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='88 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='23 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22 −1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='91 1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 1 1 −2 1 1 −2 1 1 1 −1 −2 −2 2 −1 1 −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='20 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 1 −1 1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='106 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='99 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='100 Figure 23: 3-soft graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 23 shows all 3s graphs with at most 6 vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Notice how they are generated by graphs 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='20 is the soldering of two graphs 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 4-soft graphs Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 24 shows some 4s graphs generated by articulation on the graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 33 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='82 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='80 Figure 24: 4s graphs: graphs generated by articulation Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 25 shows the 4s graphs with at most 6 vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Notice how they are generated from graphs 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 (2 configurations) and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 is included to show its connection to 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 (replacing a matching by a square).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 34 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='80 −1 1 A A −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 1 −2 −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 1 −1 1 −1 1 −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='82 A A −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='17 A −1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 1 1 −2 1 1 −2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='81 A A 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='48 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31 Figure 25: 4-soft graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 5-soft graphs Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 26 shows 5s graphs with at most 6 vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Notice how they stem from graphs 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 and two configurations of 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 35 1 1 −2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 1 1 −2 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='39 −1 −1 1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70 A 1 1 −2 1 1 1 −3 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='35 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='51 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='30 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='44 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='29 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='32 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='34 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='45 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='26 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 −1 −1 1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 S A A 1 −1 1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='57 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='57 A 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='20 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='33 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='56 A 1 1 −3 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='56 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='57 Figure 26: 5-soft graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 6-soft graphs Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 27 shows 6s graphs with at most 6 vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Notice how these graphs stem from graphs 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9, 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37, 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 (two configurations) and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 36 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37 1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 1 −1 1 1 −2 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37 −4 1 1 1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 1 −1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 −6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 − 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 Figure 27: 6-soft graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 x-soft graphs, x non integer As proven above, the only eigenvalues that are non integer are irrational.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' For these, there can be soft nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Among the 5 node graphs, we found irrational eigenvalues for the chain 5 and the cycle 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' In addition, there are the following nb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' in eigenvalue eigenvector classification 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 λ2 = 3 − √ 2 (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='27, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='65, 0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='65, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='27)T 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 λ4 = 3 + √ 2 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='65, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='27, 0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='27, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='65)T 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='21 λ4 = (7 + √ 5)/2 (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37, 0, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37)T 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='21 λ5 = (7 − √ 5)/2 (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6, 0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6)T 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='24 λ2 = (5 − √ 13)/2 (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='67, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='67, 0)T 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='24 λ5 = (5 + √ 13)/2 (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='67, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='67, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2, 0)T 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='30 (chain 5) λ4 = (3 + √ 5)/2 (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37, 0, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37)T 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='30 (chain 5) λ5 = (3 − √ 5)/2 (−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6, 0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6)T Table 2: Non trivial graphs with soft nodes and non integer eigenvalues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Remarks The graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 is 3 soft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The graphs 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='21 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='24 are not part of an integer soft class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' They are 37 1 2 3 4 5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='21 1 2 3 4 5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='24 1 2 3 4 5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 Figure 28: The graphs 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='21 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='24 with their soft node Graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 is a chain 4 with a soft node added.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='21 is obtained from chain 5 (graph 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='30) by inserting a soft node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 Minimal λ soft graphs We computed the minimal λ soft graphs for λ = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' , 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' These are presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 −1 1 −3 1 1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 −1 −1 1 1 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 Ch 3 1 −1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 1 −1 1s 2s −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37 5s −1 1 −1 1 1 −1 −1 1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70 6s 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 −4 1 1 1 1 −1 1 1 1 −2 −3 1 1 1 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 1 −1 1 −1 3s 4s Figure 29: The minimal λ soft graphs for λ = 1, 2, 3, 4, 5 and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Note that there is a unique minimal λ-soft graph for λ = 1 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' There 38 are two minimal 3-soft graphs and 4-soft graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' There are four minimal 5-soft graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The first two are generated by respectively inserting a soft node and adding a soft node to the minimal 4-soft graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The third and fourth ones are obtained respectively by adding three soft nodes to the 2 clique and adding a soft node to the 4 star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Three systematic ways to generate minimal λ+1-soft graphs are (i) inserting a zero to a λ-soft graph, (ii) adding a zero to aλ-soft graph and (iii) adding a matching to a λ − 1-soft graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' One can therefore generate sys- tematically minimal 7-soft, 8-soft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='. graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 7 Conclusion We reviewed families of graphs whose spectrum is known and presented trans- formations that preserve an eigenvalue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The link, articulation and soldering were contained in Merris [10] and we found two new transformations : the reg- ular expansion and the replacement of a coupling by a square.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We also showed transformations that shift an eigenvalue : insertion of a soft node (+1), addi- tion of a soft node (+1), insertion of a matching (+2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The first is new and the second and third were found by Das [11] and Merris [10] respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' From this appears a landscape of graphs formed by families of λ-graphs connected by these transformations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' These structures remain to be understood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We presented the connections between small graphs with up to six vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Is it possible to obtain all the λ graphs using a series of elementary transformations?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Or just part of these ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We answered partially the question: can one predict eigenvalues/eigenvectors from the geometry of a graph ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' by examining the situation of a a λ subgraph G of a λ graph G”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' We showed that if the remainder graph G′ is λ, it is an articulation or a link of G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' If not and if G and G′ share an eigenvector, the two may be related by adding one or several soft nodes to G′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' A number of the graphs we studied have irrational eigenvalues and we can define λ graphs for these as well because the transformations apply.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' However we did not find any connection between λ graphs and µ graphs if λ is an integer and µ an irrational.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' References [1] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Cvetkovic, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Rowlinson and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Simic, ”An Introduction to the Theory of Graph Spectra”, London Mathematical Society Student Texts (No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 75), (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 39 [2] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Maas, Transportation in graphs and the admittance spectrum, Discrete Applied Mathematics 16 (1987) 31-49 [3] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Caputo, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Knippel and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Simo, ”Oscillations of simple networks: the role of soft nodes”, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' A: Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Theor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 46, 035100 (2013) [4] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Caputo, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Knippel, and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Retiere, Spectral solution of load flow equations, Eng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Express, 025007, (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [5] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Bustamante-Casta˜neda, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Caputo, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Cruz-Pacheco, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Knippel and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Mouatamide, ”Epidemic model on a network: analysis and applications to COVID-19”, Physica A, 564, 125520, (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [6] U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Von Luxburg, A tutorial on spectral clustering Stat Comput, 17: 395–416, (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [7] Mark Kac, Can One Hear the Shape of a Drum?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=', The American Mathe- matical Monthly, 73, 4P2, 1-23, (1966).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [8] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Mohar, The Laplacian spectrum of graphs, in: Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Alavi, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Chartrand, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Oellermann, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Schwenk Wiley (Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' ), Graph Theory, Combina- torics and Applications, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 2, 1991, 871–898, (1991).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [9] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Biyikoglu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Leydold and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Stadler ”Laplacian Eigenvectors of Graphs”, Springer (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [10] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Merris, ”Laplacian graph eigenvectors”, Linear Algebra and its Appli- cations, 278, 22l-236, (1998) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [11] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Ch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Das, The Laplacian Spectrum of a Graph, Computers and Mathe- matics with Applications, 48, 715-724, (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [12] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Caputo, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Khames, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Knippel, ”On graph Laplacians eigenvectors with components in 1,-1,0”, Discrete Applied Mathematics, 269, 120-129, (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' [13] https://en.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='wikipedia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='org/wiki/Rational root theorem [14] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Grone and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Merris, The Laplacian spectrum of a graph, Siam J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Discrete Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' (C) 1994 Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 7, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 221-229, May 1994 [15] Thomas Edwards, ”The Discrete Laplacian of a Rectangular Grid”, web document, (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 8 Appendix A: Graph classification The following tables indicate the graph classification we used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Each line in the ”connections” column is the connection list of the corresponding graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='classification ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='nodes ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='links ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='connections ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='[1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 24 34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 24 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 23 24 25 34 35 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 23 24 34 35 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 23 42 25 34 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 24 34 35 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 24 34 35 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 15 23 25 34 35 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 34 35 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='17 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 23 25 34 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 34 35 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 24 34 35 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 35 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='21 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 23 34 35 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 15 23 25 34 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='23 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 35 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='24 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 25 35 34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='26 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 34 35 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='27 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 23 34 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 23 34 35 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='29 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 34 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='Table 3: Graphs of less than 5 nodes labelled 1 to 30 in classification [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='41 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='classification ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='nodes ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='links ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='connections ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='[1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 16 23 24 25 26 34 35 36 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 24 25 26 34 35 36 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 16 23 24 25 26 34 35 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 24 25 26 34 35 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 25 26 34 35 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 16 23 25 34 35 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 24 25 26 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 24 34 35 36 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 24 26 34 35 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 23 24 25 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 16 23 24 26 34 36 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 25 26 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 24 25 26 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 25 26 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 16 23 24 34 35 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 16 23 25 34 35 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='17 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 16 23 26 34 35 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 24 26 34 35 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 24 25 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 23 24 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='21 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 24 25 26 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='22 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 34 35 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='23 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 16 23 25 26 34 35 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='24 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 25 26 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 16 23 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='26 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 16 23 34 35 36 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='27 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 26 34 35 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 16 23 24 34 35 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='29 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 16 23 24 26 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 61 23 24 25 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='Table 4: 6 node graphs labelled 1 to 30 in classification [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='42 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='classification ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='nodes ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='links ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='connections ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='[1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 24 26 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 16 23 25 26 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='33 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 23 24 25 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 23 24 25 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='35 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 23 24 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 24 34 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='37 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 16 24 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='38 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 23 25 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='39 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 24 25 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 16 23 34 35 36 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='41 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 16 23 34 35 36 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='42 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 16 23 24 26 34 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='43 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 16 23 34 35 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='44 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 16 23 34 36 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 16 23 25 34 35 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 16 23 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='47 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 16 23 25 26 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='48 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 26 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='49 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 24 26 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 16 23 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='51 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 24 16 23 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='52 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 16 23 25 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='53 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 24 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='54 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 24 25 34 36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='55 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 24 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 15 23 25 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='57 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 24 25 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='58 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 23 25 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='59 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 23 24 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='Table 5: 6 node graphs labelled 31 to 60 in classification [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='43 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='classification ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='nodes ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='links ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='connections ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='[1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='61 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 15 16 23 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='62 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 42 34 35 36 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='63 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 15 23 25 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 25 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='65 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 24 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='66 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 16 24 34 36 45 56 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='67 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 16 23 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='68 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 16 23 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='69 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 26 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 16 23 34 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='71 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 16 23 34 35 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='72 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='73 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 23 24 26 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='74 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 16 23 25 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='75 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 34 35 36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='76 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 24 25 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='77 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 24 25 34 36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 24 34 35 36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='79 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 36 35 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 25 34 35 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='81 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 14 23 34 35 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='82 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 24 34 35 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='83 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 35 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='84 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='85 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 23 24 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='86 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='87 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 23 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='87B ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 15 24 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='88 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 34 35 36 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='89 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 16 23 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='90 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 15 23 25 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='Table 6: 6 node graphs labelled 61 to 90 in classification [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' Note that 87B is absent from [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='44 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='classification ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='nodes ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='links ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='connections ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='[1] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='91 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 25 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='92 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 16 23 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 23 34 36 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='94 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 35 36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='95 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 23 34 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 25 34 35 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='97 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 13 23 34 35 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='98 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 35 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='99 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 24 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 34 45 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='101 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 34 35 36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='102 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 25 34 36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='103 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 42 35 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='103 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 14 23 34 35 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='105 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 15 23 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='106 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 16 23 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='107 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 26 36 46 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='108 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 24 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='109 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 23 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='110 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 34 36 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='111 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 34 45 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='112 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 23 34 45 56 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='Table 7: 6 node graphs labelled 91 to 112 in classification [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 9 Appendix B: sets 1s, 2s, 3s, 4s and 5s We give here the tables for the sets 1s, 2s, 3s, 4s and 5s for 5 node graph and 6 node graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' The numbering of the graphs follow the ones given by Cvetkovic [1] for 5 and less nodes and 6 nodes graphs respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 45 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 1s nodes links classification [1] eigenvector connection 3 2 3 (−1, 0, 1) 4 3 8 (0, 0, −1, 1) 4 4 6 (1, 1, 0, −2) expansion on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 4 28 (1, 1, 0, −1, −1) 5 4 28 (1, 0, 0, 0, −1) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 4 28 (1, 1, 1, 0, −3) star 4 5 4 29 (0, 1, −1, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 5 23 (0, 0, 0, 1, −1) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 5 23 (1, 1, 0, −2, 0) expansion on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 6 18 (1, 1, 0, 1, −3) 5 6 20 (1, 1, 0, −1, −1) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 5 7 14 (1, 1, 0, 1, −3) Table 8: Five node graphs with soft nodes and eigenvalue 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 46 nodes links classification [1] eigenvector connection 6 11 10 (1, 1, 1, 1, 0, −4) link on 19 6 10 19 (1, 1, 1, 1, 0, −4) link on 33 6 9 33 (1, 1, 1, 1, 0, −4) link on 38 6 9 36 (2, 2, 2, 0, −3, −3) link on 61 6 9 38 (1, 1, 1, 1, 0, −4) link 58 6 9 53 (0, 0, 0, 0, 1, −1) link 75 6 9 53 (1, 1, 1, 0, −3, 0) link 75 6 8 56 (1, 1, 1, 1, 0, −4) expansion on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 8 58 (1, 1, 1, 1, 0, −4) expansion on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 8 61 (2, 2, 2, 0, −3, −3) link on 94 6 7 75 (0, 0, 0, 0, 1, −1) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 7 75 (1, 1, 0, 1, −3, 0) link on 101 6 7 78 (0, 0, 0, 0, 1, −1) link on 101 6 7 79 (1, 1, 0, 0, 0, −2) expansion on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 7 79 (1, 1, 0, −1, −1, 0) link on 94 6 7 92 (1, 1, 0, −1, −1, 0) link on 106 6 6 94 (1, 1, 0, −2, 0, 0) link on 107 6 6 94 (3, 3, 0, −2, −2, −2) link on 107 6 6 94 (1, −1, 0, 0, 0, 0) link on 95 6 6 95 (1, −1, 0, 0, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 6 97 (1, 1, 0, −2, 0, 0) link on 108 6 6 99 (1, 0, −1, 0, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 6 101 (0, 0, 0, 0, 1, −1) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 6 106 (0, 1, 1, 0, −1, −1) link between two 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 5 107 (−1, 1, 0, 0, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 5 107 (1, 1, −1, −1, 0, 0) soldering two 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 and articulation 6 5 107 (1, 1, 0, −2, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 5 108 (−1, 1, 0, 0, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 5 108 (−1, 0, 1, 0, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 5 109 (−1, 1, 0, 0, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 5 109 (0, 0, 0, 0, −1, 1) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 5 111 (0, 0, 0, 0, −1, 1) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 Table 9: Six node graphs with soft nodes and eigenvalue 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 47 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 2s nodes links classification [1] eigenvector connection 4 5 5 (1, 0, −1, 0) link on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 4 4 7 (1, 0, −1, 0) 4 4 7 (0, 1, 0, −1) 5 8 12 (1, 0, −2, 0, 1) link on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='17 5 7 15 (1, 0, −1, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 5 7 15 (1, 01, 0, −2) link on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='17 5 7 17 (1, 0, −2, 0, 1) 5 6 18 (0, 1, 0, −1, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 5 6 22 (0, 1, 0, −1, 0) add a zero to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 and articulation 5 6 22 (1, 0, 0, −1, 0) add a zero to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 and articulation 5 5 26 (0, 1, 0, −1, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 Table 10: Five node graphs with soft nodes and eigenvalue 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 48 nodes links classification [1] eigenvector connection 6 12 5 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) 6 11 11 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) 6 11 13 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) 6 11 14 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) 6 10 21 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) 6 10 21 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) 6 10 29 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) 6 10 31 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) link on 37 6 9 33 (−2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) link on 56 6 9 37 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1) link on 73 6 9 37 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) link on 73 6 9 37 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1) link on 73 6 9 40 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3, articulation 6 9 49 (0, 0, 1, −1, 0, 0) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3, articulation 6 9 49 (1, 0, −1, −1, 0, 1) link on 69 6 8 56 (−1, 1, 0, 0, 0, 0) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3, articulation 6 8 56 (1, 1, 0, −2, 0, 0) link on 64 6 8 57 (−1, 0, 1, 0, 0, 0) link 93 6 8 61 (0, 0, 0, 0, −1, 1) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3, articulation 6 8 64 (1, 1, 0, −2, 0, 0) expansion of 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 8 66 (0, 0, 1, 0, −1, 0) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 8 69 (0, 1, 1, −1, −1, 0) link 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 8 71 (0, 0, 0, 1, −1, 0) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 8 73 (1, 0, 0, 0, 0, −1) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 8 73 (0, 0, 0, −1, 0, 1) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 8 73 (1, 0, 1, −1, 0, −1) soldering two 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 8 74 (0, −1, 0, 1, 0, 0) link and articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 7 75 (0, −1, 0, 1, 0, 0) link 101 6 7 76 (0, 0, −1, 0, 1, 0) link 103 6 7 81 (0, −1, 0, 1, 0, 0) link and articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 7 82 (1, 0, −1, 0, −1, 1) link 104 6 7 88 (0, −1, 0, 1, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 7 90 (1, 0, 0, −1, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 7 90 (1, −1, 0, 0, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 7 91 (1, 0, −1, 0, 0, 0) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 7 92 (−1, 1, 1, 1, −1, −1) link on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 6 7 93 (0, 0, 0, 1, 0, −1) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3, articulation 6 7 93 (1, −1, −1, 0, 1, 0) 6 6 101 (0, 1, 0, −1, 0, 0) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3, articulation 6 6 103 (0, 0, 1, −1, 0, 0) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3, articulation 6 6 104 (0, 1, 0, −1, 0, 0) addition of a 0 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3, articulation 6 6 104 (1, 0, −1, 0, −1, 1) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 Table 11: Six node graphs with soft nodes and eigenvalue 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 49 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 3s nodes links classification [1] eigenvector connection 3 3 2 (−1, 1, 0) 4 4 6 (−1, 1, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 7 11 (0, −1, 0, 0, 1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 6 13 (−1, 0, 0, −1, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 8 13 (0, 1, 0, 0, −1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 7 16 (−1, 1, 0, −1, 1) addition of zero to chain 4 5 7 17 (0, 1, 0, −1, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 6 20 (−1, 1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 6 20 (0, 0, 0, −1, 1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 6 22 (0, 0, −1, 0, 1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 5 23 (−1, 1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 5 25 (−1, 1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 Table 12: Five node graphs with soft nodes and eigenvalue 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 50 nodes links classification [1] eigenvector connection 6 13 3 (−1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1) link 8 6 12 6 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) link 11 6 12 6 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 2) link 8 6 12 8 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) link 11 6 11 11 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) link 16 6 11 16 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) link 25 6 11 16 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 2) link 17 6 11 17 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) link 52 6 11 17 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) link 52 6 10 19 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) link 39 6 10 25 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1) 6 10 29 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) 6 10 32 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) link 52 6 10 32 (1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0) link 52 6 9 36 (0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' −1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 9 38 (1, 0, −1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 6 9 38 (0, 1, 0, −1, 0, 0) link 63 6 9 39 (1, 0, 0, −1, 0, 0) link 63 6 9 51 (1, 1, 0, −1, −1, 0) link 106 6 9 51 (0, 0, 1, −1, 1, −1) link 106 6 9 52 (0, 1, 0, 1, 0, −2) 6 9 52 (1, 0, 1, 0, −2, 0) 6 9 52 (1, 0, −1, 0, 0, 0) link 106 6 9 52 (0, 1, 0, −1, 0, 0) link 106 6 8 58 (−1, 1, 1, −1, 0, 0) link 79 6 8 61 (0, −1, 1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 8 62 (0, 0, 0, 0, −1, 1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 8 63 (0, 1, 0, −1, 0, 0) link 91 6 8 70 (0, −1, 1, 1, −1, 0) link 106 6 8 70 (−1, 0, 1, 1, 0, −1) link 106 6 8 74 (−1, 0, 1, −1, 0, 1) link 106 6 8 74 (0, 0, 0, −1, 0, 1) link 106 6 8 74 (−1, 0, 1, 0, 0, 0) link 106 6 7 79 (−1, 1, 0, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 7 79 (0, 0, 0, −1, 1, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 7 83 (−1, 1, 0, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 7 84 (−1, 0, 1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 7 84 (0, 0, 0, −1, 0, 1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 7 88 (0, 0, 0, 0, −1, 1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 7 91 (0, −1, 0, 1, 0, 0) 6 7 92 (1, −1, 0, 1, −1, 0) link 106 6 7 92 (0, 1, −1, 0, 1, −1) link 106 6 6 94 (0, 0, 0, 0, 1, −1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 6 97 (0, 0, 0, 0, 1, −1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 6 99 (1, −2, 1, 2, −2, 0) soldering P3 and C3 6 6 99 (0, 0, 0, 0, 1, −1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 6 100 (1, −2, 1, 1, −1, 0) soldering P3 and C3 6 6 100 (0, 0, 0, 0, 1, −1) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 6 106 (−1, 0, 1, −1, 0, 1) cycle 6 6 6 106 (−1, 1, 0, −1, 1, 0) cycle 6 Table 13: Six node graphs with soft nodes and eigenvalue 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 51 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 4s nodes links classification [1] eigenvector connection 4 5 5 (1, −2, 1, 0) 5 8 12 (1, 0, 0, 0, −1) link on 17 5 7 14 (0, 1, 0, −1, 0) 5 7 14 (−1, 0, 0, 1, 0) link on 19 5 7 17 (−1, 0, 0, 0, 1) 5 7 18 (−2, 1, 0, 1, 0) articulation on 5 5 7 19 (0, 1, 0, −1, 0) Table 14: Five node graphs with soft nodes and eigenvalue 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 52 nodes links classification [1] eigenvector connection 6 10 9 (0, 0, −1, 1, 0, 0) 6 10 9 (0, 0, −1, 1, 0, 0) 6 10 9 (0, 0, −1, 1, 0, 0) 6 10 13 (0, 0, −1, 1, 0, 0) 6 10 13 (−1, 0, 0, 0, 0, 1) 6 10 14 (0, 0, −1, 0, 1, 0) 6 10 16 (−1, 0, 1, 0, 0, 0) 6 10 18 (−1, 0, −1, 1, 0, 1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31 6 10 18 (1, 0, 0, 0, 0, −1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31 6 10 21 (1, 0, 0, 0, 0, −1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31 6 10 24 (1, 0, 0, 0, 0, −1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31 6 10 29 (0, 0, 0, 1, 0, −1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='41 6 9 31 (−1, 0, −1, 1, 0, 1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='31 6 9 31 (0, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6586, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2574, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2574, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6586, 0) 6 9 31 (1, 0, 0, 0, 0, −1) 6 9 33 (0, 0, −1, 1, 0, 0) 6 9 35 (0, −1, 1, 0, 0, 0) 6 9 36 (0, −1, 1, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='53 6 9 36 (0, 1, −1, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='53 6 9 41 (0, 0, 0, 1, 0, −1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='48 6 9 48 (1, −1, 1, 0, −1, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 6 9 48 (0, 0, 0, 1, 0, −1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='49 6 9 49 (0, −1, 0, 0, 1, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 6 9 49 (−1, 0, 0, 0, 0, −1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 6 8 53 (−1, 1, 0, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 6 8 53 (0, 1, −1, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 6 8 53 (0, −1, 1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 6 8 55 (−1, 0, 1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 6 8 55 (0, −1, 1, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 6 8 61 (0, 0, 0 − 2, 1, 1) 6 8 62 (−1, 1, 0, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='78 6 8 64 (−1, 1, 0, 0, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='17 6 8 65 (0, −1, 1, 0, 0, 0) 6 8 69 1 arbitrary zero link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 6 8 71 (1, −1, 1, 0, −1, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='93 6 8 73 (0, 1, 0, 0, −1, 0) soldering 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 7 75 (−2, 1, 0, 1, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18 6 7 78 (0, −1, 0, 1, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 6 7 80 (0, 0, −1, 0, 1, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 6 7 81 (−2, 1, 0, 1, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='18 6 7 82 (0, −1, 0, 1, 0, 0) articulation 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 6 7 93 (1, −1, 1, 0, −1, 0) Table 15: Six node graphs with soft nodes and eigenvalue 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 53 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='5 5s nodes links classification [1] eigenvector connection 5 10 10 (1, −1, 0, 0, 0) 5 10 10 (0, 1, −1, 0, 0) 5 10 10 (0, 0, 1, −1, 0) 5 10 10 (0, 0, 0, 1, −1) link on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='11 5 9 11 (1, 0, −1, 0, 0) link on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 5 9 11 (0, 0, 1, −1, 0) 5 8 12 (0, 1, 0, −1, 0) link 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 5 8 13 (1, 0, −2, 1, 0) add 2 soft nodes to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 5 7 15 (1, 1, −3, 1, 0) add soft node to 4 star 5 7 15 (0, 0, −1, 1, 0) add 3 soft nodes to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Table 16: Five node graphs with soft nodes and eigenvalue 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 54 nodes links classification [1] eigenvector connection 6 13 3 (1, 0, 0, 0, 0, −1) 6 12 5 (−1, 1, 0, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 6 12 5 (1, 1, 0, 0, 0, −2) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 6 12 8 (0, 0, 0, 0, 1, −1) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='12 6 12 8 (1, 0, 0, −1, 0, 0) 6 11 10 (1, −1, 0, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 6 11 10 (−1, 1, −1, 1, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 6 11 10 (1, −1, −1, 1, 0, 0) 6 11 11 (1, 1, 0, 0, 0, −2) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 6 11 12 (1, −1, 0, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='14 6 11 14 (1, 1, 0, 0, 0, −2) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='29 6 11 14 (1, −1, 0, 0, 0, 0) 6 11 17 (−1, 0, 0, 1, −1, 1) inserting a matching between two 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 11 17 (0, 0, 0, 1, −1, 0) add 3 soft nodes to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 6 10 19 (−1, 1, 1, −1, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='30 6 10 19 (0, 1, −1, 0, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='19 6 10 20 (1, 0, −2, 1, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='33 6 10 20 (0, 0, −1, 1, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='35 6 10 23 (0, 1, 0, 0, −1, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='32 6 10 23 (−1, 0, −1, 1, 0, 1) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='30 6 10 26 (0, 0, 0, 1, 0, −1) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='32 6 10 29 (1, −2, 1, 0, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='32 6 10 30 (1, −1, 0, 1, −1, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70 6 10 32 (0, 1, 0, 0, 0, −1) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='32 6 9 33 (1, −3, 1, 1, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='56 6 9 34 (0, 1, 0, −1, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='45 6 9 35 (1, 0, 0, −1, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='45 6 9 38 (1, −1, 1, −1, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 6 9 39 (1, −2, 0, 1, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='13 6 9 44 (−1, 0, 1, −1, 0, 1) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70 6 9 45 (0, 0, 1, −1, 0, 0) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='57 6 9 51 (0, 0, 1, −1, 1, −1) link on 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='70 6 8 56 (1, 1, −3, 1, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 6 8 57 (0, −1, 0, 1, 0, 0) articulation on 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='15 6 8 70 (−1, 0, 1, −1, 0, 1) insert matching on cycle 3 Table 17: Six node graphs with soft nodes and eigenvalue 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 55 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 6s nodes links classification [1] eigenvector connection 6 13 1 (1, −1, 0, 0, 0, 0) 6 13 1 (1, 0, −1, 0, 0, 0) 6 13 1 (1, 0, 0, −1, 0, 0) 6 13 1 (1, 1, −2, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='3 6 13 2 (1, −1, 0, 0, 0, 0) 6 13 2 (1, 0, −1, 0, 0, 0) 6 13 2 (1, 0, 0, −1, 0, 0) 6 13 3 (1, −2, 0, 1, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 6 12 3 (0, 1, 0, 0, −1, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='4 6 12 4 (1, −2, 0, 1, 0, 0) 6 12 4 (0, 1, 0, 0, −1, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='6 6 12 5 (0, 0, 1, 0, −1, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 12 6 (1, 1, 0, 0, 0, −2) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='16 6 12 6 (1, 0, −1, 0, 0, 0) link 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 12 7 (0, 1, 0, 0, −1, 0) add four soft nodes to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 6 12 8 1 arbitrarily placed 0 6 11 9 (1, 1, 0, −1, 1, 0) add two soft nodes to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='7 6 11 11 (1, 1, 1, −4, 1, 0) add a soft node to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='28 6 11 13 1 arbitrarily placed 0 6 11 16 (1, 0, 1, 0, −2, 0) add 3 soft nodes to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='2 6 9 37 (1, 0, 0, −1, 0, 0) add 4 soft node to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content='1 Table 18: Six node graphs with soft nodes and eigenvalue 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} +page_content=' 56' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/m9E_T4oBgHgl3EQf7Ry5/content/2301.08369v1.pdf'} diff --git a/mNE2T4oBgHgl3EQfeQdZ/content/tmp_files/2301.03914v1.pdf.txt b/mNE2T4oBgHgl3EQfeQdZ/content/tmp_files/2301.03914v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..06a38297eeaab40989d36eed0ad48e31a482d71c --- /dev/null +++ b/mNE2T4oBgHgl3EQfeQdZ/content/tmp_files/2301.03914v1.pdf.txt @@ -0,0 +1,860 @@ +Learning with minimal effort: leveraging in silico +labeling for cell and nucleus segmentation +Thomas Bonte1,2,3, Maxence Philbert1,2,3, Emeline Coleno4, Edouard +Bertrand4, Arthur Imbert1,2,3,5, and Thomas Walter1,2,3,5[0000−0001−7419−7879] +1 Centre for Computational Biology (CBIO), Mines Paris, PSL University, 75006 +Paris, France {Thomas.Bonte, Thomas.Walter}@minesparis.psl.eu +2 Institut Curie, PSL University, 75248 Paris Cedex, France +3 INSERM, U900, 75248 Paris Cedex, France +4 IGH, University of Montpellier, CNRS, 34090 Montpellier, France +5 corresponding authors +Abstract. Deep learning provides us with powerful methods to perform +nucleus or cell segmentation with unprecedented quality. However, these +methods usually require large training sets of manually annotated im- +ages, which are tedious — and expensive — to generate. In this paper +we propose to use In Silico Labeling (ISL) as a pretraining scheme for +segmentation tasks. The strategy is to acquire label-free microscopy im- +ages (such as bright-field or phase contrast) along fluorescently labeled +images (such as DAPI or CellMask™). We then train a model to predict +the fluorescently labeled images from the label-free microscopy images. +By comparing segmentation performance across several training set sizes, +we show that such a scheme can dramatically reduce the number of re- +quired annotations. +Keywords: Segmentation · Transfer learning · Pretext task · In Silico +Labeling · Fluorescence microscopy +1 +Introduction +Detection and segmentation of cells and nuclei, among other cell structures, are +essential steps for microscopy image analysis. Deep Learning has provided us +with very powerful methods to perform these segmentation tasks. In particu- +lar, recently published neural networks, such as NucleAIzer [1], Cellpose [2] or +StarDist [3], trained on hundreds of images of different modalities, give excel- +lent results, outperforming by far traditional methods for image segmentation. +However, the main drawback of state-of-the-art networks is the need for large +amounts of fully annotated ground truth images, which can take a significant +amount of time to create. Here, we present an alternative strategy, where we pre- +train our segmentation models using In Silico Labeling (ISL) before fine-tuning +them on a very small data set to perform nucleus and cell segmentation. +ISL was first introduced by [4], aiming to predict fluorescent labels from +bright-field inputs. Fluorescence microscopy is the major technique employed +arXiv:2301.03914v1 [eess.IV] 10 Jan 2023 + +2 +T. Bonte et al. +in cellular image-based assays, as the use of fluorescence labels allows to high- +light particular structures or phenotypic cell states. However, the number of +fluorescent labels is limited (typically up to 4). In addition, phototoxicity and +photobleaching can also represent serious drawbacks. +To tackle these limitations, several variants have been proposed. In [5], ISL +is applied to predict fluorescent labels from transmitted-light images (DIC), or +immunofluorescence from electron micrographs. Besides, Generative Adversarial +Networks (GAN) are used in [6] to predict different stains: H&E, Jones Silver +or Masson’s trichrome. They underlie staining standardization as an advantage +of ISL. In another paper [7] GANs are also used on different transmitted light +images: quantitative phase images (QPI). Moreover, in [8] conditional GANs +(cGAN) generate H&E, PSR and Orcein stained images from unstained bright- +field inputs. In [9], using the same data set and same tasks as [4], the authors add +attention blocks to capture more information than usual convolutions. Finally, +stained images of human sperm cells are generated in [10], from quantitative +phase images. They use these virtually stained images to recognize normal from +abnormal cells. The principle of ISL has also been proposed for experimental +ground truth generation for training cell classifiers for the recognition of dead +cells [11, 12], tumour cells [13] embryo polarization [14] or the cell cycle phase +[15]. +In this paper we show that models trained to generate fluorescence mi- +croscopy images with nuclear or cytoplasmic markers can be used efficiently to +pretrain segmentation networks for nuclear and cell segmentation, respectively. +To the best of our knowledge, no previous work has used ISL as a pretext task +for segmentation of cell structures. This provides us with a powerful strategy to +minimize the annotation burden for a given application, and to train models on +large data sets, requiring only minimal effort in terms of manual annotation. +2 +Materials and Methods +2.1 +Image Acquisition +We work on two different data sets. The first dataset has been generated by the +Opera Phenix™ Plus High-Content Screening System (Perkin Elmer). It contains +960 images of dimension (2160, 2160). For each position, we acquired bright- +field images and DAPI, both at 4 different focal planes. DAPI is a very common +fluorescent stain binding to AT-rich regions of the DNA, which can thus be used +to locate the nucleus in eukaryotic cells. Additionally we have a phase contrast +image, computationally created from the 4 bright-field images by a proprietary +algorithm of the Opera system. Images contain on average 15.6±19.6 cells. +Our second data set contains 100 images of dimension (1024, 1024). We +used Differential Interference Contrast (DIC) as label-free microscopy technique, +and we marked the cytoplasmic membrane with the CellMask™ marker (Life +Technologies). Images contain on average 52.4±15.1 cells. + +Learning with minimal effort: leveraging ISL for segmentation +3 +2.2 +Nucleus Segmentation +Nucleus segmentation is one of the most important segmentation tasks in biology, +as nuclear morphologies are indicative of cellular states, and because they are +visually very different from the cytoplasm. Segmentation of the nucleus is usually +a comparatively simple segmentation task, and for this reason we assumed that +this might be a good first segmentation problem to investigate our ISL-based +pretraining. +DAPI prediction as pretraining task The first step of our strategy for +nucleus segmentation is the prediction of DAPI images from bright-field inputs. +We used a data set of 421 images of dimension (2160, 2160), divided into 384 +images for training and 37 images for testing. 5 images of dimension (512, 512) +were randomly cropped from each initial image (see Fig.1). Note that we only +included images containing at least one nucleus. +Inspired by the work of [4], the model is a U-net-shape model [16] with +a densenet121 architecture [17]. It has been previously trained on ImageNet +[18], hence it is referred to as ’on steroids’ in the following. As input we used +3 channels, 2 being bright-field images of the same field-of-view with different +focal planes, and the third the corresponding phase-contrast image. As output +we used only one channel, the maximum intensity projection of our DAPI images +(z-stack, 4 focal planes) that we have for each field-of-view. +We did not use any data augmentation. All training details are reported in +Supplementary Table 1. +(a) +(b) +(c) +Fig. 1: Images from the same field-of-view, for a given focal plane. (a) Bright- +field image. (b) Phase contrast image, computationally generated by the Opera +system. (c) Fluorescent DAPI image. +Transfer Learning for Nucleus Segmentation. In a first step, we aimed at +investigating how pretraining on fluorescent markers impacts semantic segmen- +tation. For this, we turned to nucleus segmentation. + +4 +T. Bonte et al. +In order to generate the ground truth, we applied Cellpose [2], a widely used +segmentation technique in bioimaging, based on a U-net-shaped network, trained +on massive amounts of heterogeneous data. We applied Cellpose to the DAPI +channel and corrected the segmentation results manually. As segmentation of +nuclei from DAPI images with high resolution is a fairly simple task, as expected +the results were overall excellent. +Next, we used training sets with different sizes N ∈ {1, 10, 50, 100, 200, 500}, +composed of images of dimension (2160, 2160) and evaluated the accuracy for +each N. Testing is always performed on the same withheld 190 images. 5 images +of dimension (512, 512) were randomly cropped from each initial image. +To investigate whether our pretraining scheme is useful for segmentation, +we compare two different models. The first model is composed of the U-net +’on steroids’ followed by a sigmoid activation function in order to output, for +each pixel, its probability of belonging to a nucleus (Fig.2a). The second model +has the same U-net architecture but is pretrained on DAPI images, and has an +activation function displayed in equation (1) that takes a different range into +account (Fig.2b). The reason for this choice is that the model pretrained on +DAPI images is likely to output values between 0 and 1, so we centered the +following activation function around 0.5. +f(x) = +1 +1 + exp(−(x − 0.5)) +(1) +We did not use any data augmentation. All training details are reported in +Supplementary Table 1. +2.3 +Cell Segmentation +We next turned to the application of our pretraining scheme to cell segmentation, +a more difficult multiple instance segmentation scenario. +CellMask™ Prediction as Pretraining Task In our pretraining strategy, +the first step of cell segmentation is the prediction of CellMask™ (Fig.3b) images +from DIC microscopy as inputs (Fig.3a). +We used a data set of 100 images of dimension (1024, 1024), divided into 90 +images for training and 10 images for testing. 5 images of dimension (512, 512) +were randomly cropped for each initial image. +For comparison, we again used the U-net ’on steroids’. We did not use any +data augmentation. All training details are reported in Supplementary Table 2. +Transfer Learning for Cell Segmentation. Segmentation of cells is usually +more difficult than nuclear segmentation, because cells tend to touch each other, +and the precise detection of the contact line can be challenging. Indeed, we need +to turn to multiple instance segmentation, where object properties are predicted +together with pixel labels. + +Learning with minimal effort: leveraging ISL for segmentation +5 +(a) +(b) +Fig. 2: Compared models to predict nucleus semantic segmentation. (a) U-net +’on steroids’ which has not been trained on DAPI images. (b) U-net ’on steroids’ +pretrained on DAPI images. Note the difference in the activation functions. +(a) +(b) +Fig. 3: Images from the same field-of-view, for a given focal plane. (a) DIC image. +(b) Fluorescent CellMask™ image. + +U-net (densenet121) +Sigmoid function +Cellpose +Phase contrast +Pretrained on +1 +segmentation ++ Bright field +ImageNet +(2 focal planes) +1 + exp(-x)U-net (densenet121) +Custom sigmoid +Cellpose +Phase contrast +Pretrained on DAPl +function ++ Bright field +segmentation +images +(2 focal planes) +1 +1 + exp(-(x - 0.5))6 +T. Bonte et al. +Again, we used Cellpose [2] with manual correction to generate this instance +segmentation ground truth images from associated CellMask™ images (Fig.4a, +Fig.4b). +As for nuclear segmentation, we used training sets of different sizes N ∈ +{1, 10, 50, 80} of dimension (1024, 1024) and evaluated the accuracy for each of +them. Testing is always performed on the same 17 images. 5 images of dimension +(512, 512) were randomly cropped from each initial image. +To tackle the issue of instance segmentation, we implemented a model pre- +dicting both a cell semantic segmentation image (Fig.4c) and a distance map, +i.e. an image where pixels values get higher as they are closer to the center of +the cell, the background remaining black (Fig.4d), as proposed in [19], [20]. +(a) +(b) +(c) +(d) +Fig. 4: (a) Fluorescent CellMask™ image. (b) Corresponding cell instance seg- +mentation image generated by Cellpose. (c) Cell semantic segmentation image +generated from Cellpose output. (d) Distance map generated from Cellpose out- +put. +Like in the previous section we compare two models to investigate whether +transfer learning from an ISL model can significantly improve the accuracy of our +segmentation. The first model is the U-net ’on steroids’, outputting 2 channels +(Fig.5a). The second model has the same U-net architecture but is pretrained on +CellMask™ images, thus outputting only 1 channel. Hence we add two Conv2d +layers at the end to upscale to 2 channels (Fig.5b). +We did not use any data augmentation. All training details are reported in +Supplementary Table 2. Both models use CombinedLossα, presented in equation +(2), as loss function. MSELoss stands for the usual Mean Square Error, while +BCEWithLogitsLoss combines a sigmoid layer with the Binary Cross Entropy +loss. y represents the output of our model, with the two channels yd and ys +standing for the distance and semantic segmentation image, respectively. The +factor α is used to balance the weights of the different losses during training. +It has been set as α = 2000, 2000 being the initial ratio between MSELoss +and BCEWithLogitsLoss. This has been inspired by the loss function used in +Cellpose [2], which also uses a loss function computed as the sum of two loss +functions, one for each output channel. + +Learning with minimal effort: leveraging ISL for segmentation +7 +CombinedLossα(y) = CombinedLossα((yd, ys)) += MSELoss(yd) + α · BCEWithLogitsLoss(ys) +(2) +(a) +(b) +Fig. 5: Models compared to predict cell instance segmentation: (a) U-net ’on +steroids’ which has not been trained on CellMask™ images. (b) U-net model +pretrained on CellMask™ images. +Finally, we apply a post-processing step to get the final results. For this, we +apply the h-maxima transformation of the predicted distance map, with h = 10. +The h-maxima transformation is defined as the reconstruction by dilation of f−h +under f: HMAXh(f) = Rδ +f(f − h), and removes insignificant local maxima. f +stands for the initial image, which is in our case the reconstructed distance map +displayed in Fig.6. h stands for the minimum local contrast for a local maximum +to be kept; otherwise it will be removed. Each local maximum represents a cell. +The local maxima of HMAX then serve as seed for the watershed algorithm, +which splits semantic segmentation result into individual regions, one for each + +Distance map +DIC +U-net (densenet121) +(3 focal planes) +Pretrained on ImageNet +Ce semantic segmentationDistance map +U-net (densenet121) +DIC +Pretrained on +2 Conv2d blocks +(3 focal planes) +CellMask images +Ce semantic segmentatior8 +T. Bonte et al. +maximum of HMAX. This leads to an instance segmentation image, such as +the one presented in Fig.6. +Fig. 6: Pipeline to get instance segmentation image from both distance map +and semantic segmentation image. H-maxima transform followed by watershed +algorithm enable to segment cells instance-wise. +3 +Results +3.1 +Evaluation metrics +The metric used to evaluate DAPI and CellMask™ prediction performance is +the Pearson Correlation Coefficient (PCC, equation (3)). PCC is defined as the +covariance of two variables divided by the product of their standard deviations. +In the equation, x and y are two images to compare, and ¯x is the average of x. +PCC(x, y) = +n� +i=0 +(xi − ¯x)(yi − ¯y) +� +n� +i=0 +(xi − ¯x)2 +� +n� +i=0 +(yi − ¯y)2 +(3) +To evaluate nucleus semantic segmentation, we use the Jaccard index (equa- +tion (4)). The Jaccard index, or Intersection Over Union (IoU), is a very popu- +lar metric in segmentation, as it equally penalizes both False Positive and False +Negative pixels. A perfect segmentation would lead to IoU of 1, while IoU cor- +responds to an entirely missed object (no intersection). +IoU(x, y) = +� 1 +if x ∪ y = 0 +x∩y +x∪y +otherwise +(4) +While the IoU is perfectly suitable to make pixel-wise comparisons for seman- +tic segmentation, the performance of instance segmentation needs to incorporate + +Reconstruction +by dilation +Distance map +Reconstructed distance map +Watershed +Local maxima +Cell +segmentation +MaskLearning with minimal effort: leveraging ISL for segmentation +9 +an object-wise comparison that does not only penalize wrong pixel decisions, but +also fused or split objects. For this, we choose to use the Mean Average Precision +(mAP), which is a popular metric for instance segmentation evaluation. For this, +a connected component from the ground truth is matched with a connected com- +ponent from the segmentation result, if the IoU of the two components is above +a given threshold. In this case, the object is considered as a TP. Unmatched +connected components from the ground truth and the segmentation result are +considered as FN and FP, respectively. Thus, given an IoU threshold one can +compute the precision as defined in equation (5). +Precision = +TP +TP + FP + FN +(5) +Precision is computed for all 10 IoU thresholds in {0.5 + i × 0.05, i ∈ [[0, 9]]}. +The final result is the mean of these 10 values, hence called mean AP, or mAP. +3.2 +Nucleus Segmentation +DAPI prediction yields very good results, with a PCC of 0.95±0.08. +Using the Jaccard index (or IoU) as metric, the U-net ’on steroids’ gives +0.64±0.2 after training on 1 single image. In comparison, the model pretrained +on DAPI reaches 0.84±0.1, improving the previous score by 31.3% (Fig.7a). +This improvement decreases as the size of the training set increases, being 4.8% +(respectively 1.1%, 0.0%, 0.0%, -1.1%) after training on 10 (respectively 50, 100, +200, 500) (Fig.7b). +Results from both models trained on 1 single image are displayed in Fig.8. +(a) +(b) +Fig. 7: Nucleus segmentation results. (a) Intersection Over Union (IoU) score for +non ISL-pretrained and ISL-pretrained models, after training on 1 image. (b) +Evolution of IoU average score for both models for different training set sizes. + +1.0 +0.8 +18000000 +0.6 +loU +8 +0.4 +0.2 +oo +0.0 +8 +Non ISL-pretrained +ISL-pretrainedloU evolution in function of training set size +1.2 +1.0 +0.8 +0.4 +ISL-pretrained model +0.2 +Non ISL-pretrained model +0.0 +1 +10 +50 +100 +200 +500 +Training set size (number of pictures - log scale)10 +T. Bonte et al. +Fig. 8: Input bright-field images, DAPI images, DAPI predictions generated by +U-net ’on steroids’, ground truth instance segmentation generated by Cellpose, +non ISL-pretrained U-net ’on steroids’ segmentation prediction, ISL-pretrained +U-net ’on steroids’ segmentation prediction. Segmentation is performed after +training on 1 image for both models. +3.3 +Cell Segmentation +CellMask™ prediction also yields very good results, with a PCC of 0.97±0.02. +Using mAP as metric, the U-net ’on steroids’ gives 0.17±0.1 after training +on 1 single image. In comparison, the model pretrained on CellMask™ reaches +0.33±0.09, improving the previous score by 94.1% (Fig.9a). As in the previous +section this improvement decreases as the size of the training set increases, be- +ing 18.5% (respectively -3.0%, -2.9%) after training on 10 (respectively 50, 80) +(Fig.9b). +Results from both models trained on 1 single image are displayed in Fig.10. +4 +Discussion +The results presented in the previous sections show that pretraining with In +Silico Labeling as pretext task significantly improves the performance of a seg- +mentation model trained on a very small data set. Indeed, the accuracy raises by +31.3% and 94.1% for nucleus semantic segmentation and cell instance segmen- +tation, respectively, after training on 1 single image, using a model pretrained +in an ISL setting. +The fact that pretraining on DAPI images helps to generate a nucleus seman- +tic segmentation was actually expected since the two outputs (DAPI and binary +segmentation maps) are very close to each other. On the other hand, cell instance + +Ground truth +Non ISL-pretrained +ISL-pretrained +Input +GroundtruthDAPl +PredictedDAPl +segmentation +segmentation +segmentationLearning with minimal effort: leveraging ISL for segmentation +11 +(a) +(b) +Fig. 9: Cell segmentation results. (a) mAP score for non ISL-pretrained and ISL- +pretrained models, after training on 1 image. (b) Evolution of mAP average score +for both models for different training set sizes. +Fig. 10: Input DIC images, CellMask™ images, CellMask™ predictions gener- +ated by U-net ’on steroids’, ground truth instance segmentation generated by +Cellpose, non ISL-pretrained U-net ’on steroids’ segmentation prediction, ISL- +pretrained U-net ’on steroids’ segmentation prediction. Segmentation is per- +formed after training on 1 image for both models. + +0.5 +0.4 +AP +0.3 +m +0.2 +0.1 +0.0 +Non IsL-pretrained +ISL-pretrainedmAp evolution in function of training set size +1.2 +ISL-pretrained model +1.0 +Non iSL-pretrained model +0.8 +mAP +0.6 +0.4 +0.2 +0.0 +1 +10 +50 +80 +Training set size (number of pictures - log scale)Ground truth +Predicted +Ground truth +NonIsL-pretrained +ISL-pretrained +Input +CellMask +CellMask +segmentation +segmentation +segmentation12 +T. Bonte et al. +segmentation is a much more complex problem, and our results clearly indicate +that also in this situation, pretraining with fluorescent marker prediction as a +pretext task significantly improves segmentation accuracy for small datasets. We +also observe that transfer learning is useful if we work on a very small data set +(1 to 10 images), but that for both nucleus and cytoplasmic segmentation, the +accuracy difference disappears if the models are trained on more than 10 images. +This being said, if one has access to fluorescent images, it makes sense to use +our proposed method to pretrain the network. +From a practical point of view, this idea provides an interesting alternative to +manual annotation, in particular in the context of High Content Screening, where +it is fairly easy to generate large amounts of data that contain both label-free +and fluorescently labeled microscopy images. In this case, we can train efficient +models for fluorescence prediction, and use these models in a pre-training scheme +to reduce the manual annotation burden. Finally, we showed here that this pre- +training scheme is effective for segmentation of nuclei and cells, but we also +believe that this could be effective for any other type of cell structures as soon +as you can get the associated fluorescent images available. Furthermore, it will be +interesting to investigate to which extent the pre-training scheme provides good +starting points for generalist networks, applicable to a wide variety of modalities. +5 +Conclusion +In this paper, we demonstrated that pretraining on the prediction of relevant +fluorescent markers can be very useful to segment nuclei or cells. We showed that +a model trained to predict some fluorescent structures from label-free microscopy +can learn to segment these structures from a very small data set, down to 1 single +image. We believe that this can be of great help for applications where fluorescent +data are easily available, if one wants to avoid tedious manual annotation to build +large ground truth datasets for the training of neural networks. With only a few +images, it is possible to fine-tune a pretrained model achieving performances +matching those obtained by ImageNet-pretrained state-of-the-art networks fine- +tuned on a much larger set of images. Our pre-training scheme can thus help +biologists to save time and money without sacrificing any accuracy. +Code availability Code (pre-processing, training and testing, post-processing +pipelines), is available at https://github.com/15bonte/isl segmentation. +Acknowledgments This work has been supported by the French government +under management of Agence Nationale de la Recherche (ANR) as part of the +“Investissements d’avenir” program, reference ANR-19-P3IA-0001 (PRAIRIE +3IA Institute), the Q-Life funded project CYTODEEP (ANR-17-CONV-0005) +and the ANR project TRANSFACT (ANR-19-CE12-0007). Furthermore, we also +acknowledge support by France-BioImaging (ANR-10-INBS-04). + +Learning with minimal effort: leveraging ISL for segmentation +13 +References +[1] +Reka Hollandi et al. “nucleAIzer: A Parameter-free Deep Learning Frame- +work for Nucleus Segmentation Using Image Style Transfer”. In: Cell Sys- +tems 10.5 (May 2020), 453–458.e6. issn: 24054712. doi: 10.1016/j.cels. +2020.04.003. +[2] +Carsen Stringer et al. “Cellpose: a generalist algorithm for cellular segmen- +tation”. In: Nature Methods 18.1 (Jan. 2021), pp. 100–106. issn: 1548-7105. +doi: 10.1038/s41592-020-01018-x. url: https://doi.org/10.1038/ +s41592-020-01018-x. +[3] +Uwe Schmidt et al. “Cell Detection with Star-Convex Polygons”. In: Medi- +cal Image Computing and Computer Assisted Intervention - MICCAI 2018 +- 21st International Conference, Granada, Spain, September 16-20, 2018, +Proceedings, Part II. 2018, pp. 265–273. doi: 10.1007/978-3-030-00934- +2_30. +[4] +Eric M. Christiansen et al. “In Silico Labeling: Predicting Fluorescent +Labels in Unlabeled Images”. In: Cell 173.3 (2018), 792–803.e19. issn: +0092-8674. doi: https://doi.org/10.1016/j.cell.2018.03.040. url: +https://www.sciencedirect.com/science/article/pii/S0092867418303647. +[5] +Chawin Ounkomol et al. “Label-free prediction of three-dimensional fluo- +rescence images from transmitted-light microscopy”. In: Nature Methods +15.11 (Nov. 2018), pp. 917–920. issn: 1548-7105. doi: 10.1038/s41592- +018-0111-2. url: https://doi.org/10.1038/s41592-018-0111-2. +[6] +Yair Rivenson et al. “Virtual histological staining of unlabelled tissue- +autofluorescence images via deep learning”. In: Nature Biomedical Engi- +neering 3.6 (June 2019), pp. 466–477. issn: 2157-846X. doi: 10.1038/ +s41551-019-0362-y. url: https://doi.org/10.1038/s41551-019- +0362-y. +[7] +Yair Rivenson et al. “PhaseStain: the digital staining of label-free quan- +titative phase microscopy images using deep learning”. In: Light: Science +& Applications 8.1 (Feb. 2019), p. 23. issn: 2047-7538. doi: 10.1038/ +s41377-019-0129-y. url: https://doi.org/10.1038/s41377-019- +0129-y. +[8] +Dan Li et al. “Deep Learning for Virtual Histological Staining of Bright- +Field Microscopic Images of Unlabeled Carotid Artery Tissue”. In: Molecu- +lar Imaging and Biology 22.5 (Oct. 2020), pp. 1301–1309. issn: 1860-2002. +doi: 10.1007/s11307-020-01508-6. url: https://doi.org/10.1007/ +s11307-020-01508-6. +[9] +Yi Liu et al. “Global Pixel Transformers for Virtual Staining of Microscopy +Images”. In: IEEE Transactions on Medical Imaging PP (Jan. 2020), +pp. 1–1. doi: 10.1109/TMI.2020.2968504. +[10] +Yoav N. Nygate et al. “Holographic virtual staining of individual biological +cells”. In: Proceedings of the National Academy of Sciences 117.17 (2020), +pp. 9223–9231. doi: 10.1073/pnas.1919569117. eprint: https://www. +pnas.org/doi/pdf/10.1073/pnas.1919569117. url: https://www. +pnas.org/doi/abs/10.1073/pnas.1919569117. + +14 +T. Bonte et al. +[11] +Joseph Boyd et al. “Experimentally-Generated Ground Truth for Detect- +ing Cell Types in an Image-Based Immunotherapy Screen”. In: 2020 IEEE +17th International Symposium on Biomedical Imaging (ISBI). 2020, pp. 886– +890. doi: 10.1109/ISBI45749.2020.9098696. +[12] +Chenfei Hu et al. “Live-dead assay on unlabeled cells using phase imaging +with computational specificity”. In: Nature Communications 13.1 (Feb. +2022), p. 713. issn: 2041-1723. doi: 10.1038/s41467-022-28214-x. url: +https://doi.org/10.1038/s41467-022-28214-x. +[13] +Jingfang K Zhang et al. “Automatic Colorectal Cancer Screening Using +Deep Learning in Spatial Light Interference Microscopy Data”. en. In: +Cells 11.4 (Feb. 2022). +[14] +Cheng Shen et al. “Stain-free detection of embryo polarization using deep +learning”. In: Scientific Reports 12.1 (Feb. 2022), p. 2404. issn: 2045-2322. +doi: 10.1038/s41598-022-05990-6. url: https://doi.org/10.1038/ +s41598-022-05990-6. +[15] +Yuchen R. He et al. “Cell Cycle Stage Classification Using Phase Imaging +with Computational Specificity”. In: ACS Photonics 9.4 (2022), pp. 1264– +1273. doi: 10.1021/acsphotonics.1c01779. eprint: https://doi.org/ +10.1021/acsphotonics.1c01779. url: https://doi.org/10.1021/ +acsphotonics.1c01779. +[16] +Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-Net: Convolu- +tional Networks for Biomedical Image Segmentation. 2015. doi: 10.48550/ +ARXIV.1505.04597. url: https://arxiv.org/abs/1505.04597. +[17] +Pavel Yakubovskiy. Segmentation Models Pytorch. https://github.com/ +qubvel/segmentation_models.pytorch. 2020. +[18] +Jia Deng et al. “Imagenet: A large-scale hierarchical image database”. In: +2009 IEEE conference on computer vision and pattern recognition. Ieee. +2009, pp. 248–255. +[19] +Peter Naylor et al. “Nuclei Segmentation in Histopathology Images Using +Deep Neural Networks”. In: 2017 IEEE 14th International Symposium on +Biomedical Imaging (ISBI 2017) (2017), IEEE, EMB, IEEE Signal Proc +Soc. issn: 19458452. doi: 10.1109/ISBI.2017.7950669. +[20] +Peter Naylor et al. “Segmentation of Nuclei in Histopathology Images by +Deep Regression of the Distance Map”. In: IEEE Transactions on Medical +Imaging 38.2 (2019), pp. 448–459. doi: 10.1109/TMI.2018.2865709. + +Learning with minimal effort: leveraging ISL for segmentation +15 +Supplementary material +Input +Output +Pretrain +Images Crops Training Epochs Learning +Loss +Training Evaluation +samples +rate +time +BF + PC DAPI +ImageNet +384 +1920 192 000 +1000 +0.1 +L1 +∼2d +PCC +BF + PC Nuclei +ImageNet +1 +5 +5 000 +5 000 +0.01 +Jaccard +∼2h +IoU +BF + PC Nuclei +ImageNet +2 +10 +5 000 +5 000 +0.01 +Jaccard +∼2h +IoU +BF + PC Nuclei +ImageNet +10 +50 +5 000 +1 000 +0.01 +Jaccard +∼2h +IoU +BF + PC Nuclei +ImageNet +50 +250 +10 000 +400 +0.01 +Jaccard +∼2h +IoU +BF + PC Nuclei +ImageNet +100 +500 +10 000 +200 +0.01 +Jaccard +∼2h +IoU +BF + PC Nuclei ISL (DAPI) +1 +5 +20 000 +20 000 +0.01 +Jaccard +∼3h +IoU +BF + PC Nuclei ISL (DAPI) +2 +10 +20 000 +20 000 +0.01 +Jaccard +∼3h +IoU +BF + PC Nuclei ISL (DAPI) +10 +50 +20 000 +4 000 +0.01 +Jaccard +∼3h +IoU +BF + PC Nuclei ISL (DAPI) +50 +250 +20 000 +800 +0.01 +Jaccard +∼3h +IoU +BF + PC Nuclei ISL (DAPI) +100 +500 +20 000 +400 +0.01 +Jaccard +∼3h +IoU +Table 1: Training details for DAPI and nucleus segmentation models. Segmen- +tation models were supposed to be trained on 20 000 training samples, yet for +non ISL-pretrained models the training was stopped earlier as the model was not +learning anything after some point. All models have the same U-net densenet121 +architecture. They were trained with an ADAM optimizer on GPU hardware. +BF stands for bright-field and PC for phase contrast. + +16 +T. Bonte et al. +Input +Output +Pretrain +Images Crops Training Epochs Learning +Loss +Training Evaluation +samples +rate +time +DIC +CellMask™ +ImageNet +65 +325 +132 000 +4000 +0.1 +L1 +∼1d +PCC +DIC Distance + Mask +ImageNet +1 +5 +10 000 +10 000 +0.01 +CombinedLossα +∼1h30 +mAP +DIC Distance + Mask +ImageNet +2 +10 +10 000 +10 000 +0.01 +CombinedLossα +∼1h30 +mAP +DIC Distance + Mask +ImageNet +10 +50 +10 000 +2 000 +0.01 +CombinedLossα +∼1h30 +mAP +DIC Distance + Mask +ImageNet +50 +250 +10 000 +400 +0.01 +CombinedLossα +∼1h30 +mAP +DIC Distance + Mask +ImageNet +80 +400 +10 000 +250 +0.01 +CombinedLossα +∼1h30 +mAP +DIC Distance + Mask ISL (CellMask™) +1 +5 +10 000 +10 000 +0.01 +CombinedLossα +∼2h +mAP +DIC Distance + Mask ISL (CellMask™) +2 +10 +10 000 +10 000 +0.01 +CombinedLossα +∼2h +mAP +DIC Distance + Mask ISL (CellMask™) +10 +50 +10 000 +2 000 +0.01 +CombinedLossα +∼2h +mAP +DIC Distance + Mask ISL (CellMask™) +50 +250 +10 000 +400 +0.01 +CombinedLossα +∼2h +mAP +DIC Distance + Mask ISL (CellMask™) +80 +400 +10 000 +250 +0.01 +CombinedLossα +∼2h +mAP +Table 2: Training details for CellMask™ and cell segmentation models. All models +have the same U-net densenet121 architecture. They were trained with an ADAM +optimizer on GPU hardware. + diff --git a/mNE2T4oBgHgl3EQfeQdZ/content/tmp_files/load_file.txt b/mNE2T4oBgHgl3EQfeQdZ/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..e9b3fd2eb09f9258d822f153d6b61bbf9ef1f68d --- /dev/null +++ b/mNE2T4oBgHgl3EQfeQdZ/content/tmp_files/load_file.txt @@ -0,0 +1,543 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf,len=542 +page_content='Learning with minimal effort: leveraging in silico labeling for cell and nucleus segmentation Thomas Bonte1,2,3, Maxence Philbert1,2,3, Emeline Coleno4, Edouard Bertrand4, Arthur Imbert1,2,3,5, and Thomas Walter1,2,3,5[0000−0001−7419−7879] 1 Centre for Computational Biology (CBIO), Mines Paris, PSL University, 75006 Paris, France {Thomas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='Bonte, Thomas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='Walter}@minesparis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='psl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='eu 2 Institut Curie, PSL University, 75248 Paris Cedex, France 3 INSERM, U900, 75248 Paris Cedex, France 4 IGH, University of Montpellier, CNRS, 34090 Montpellier, France 5 corresponding authors Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Deep learning provides us with powerful methods to perform nucleus or cell segmentation with unprecedented quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' However, these methods usually require large training sets of manually annotated im- ages, which are tedious — and expensive — to generate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In this paper we propose to use In Silico Labeling (ISL) as a pretraining scheme for segmentation tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The strategy is to acquire label-free microscopy im- ages (such as bright-field or phase contrast) along fluorescently labeled images (such as DAPI or CellMask™).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We then train a model to predict the fluorescently labeled images from the label-free microscopy images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' By comparing segmentation performance across several training set sizes, we show that such a scheme can dramatically reduce the number of re- quired annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Keywords: Segmentation · Transfer learning · Pretext task · In Silico Labeling · Fluorescence microscopy 1 Introduction Detection and segmentation of cells and nuclei, among other cell structures, are essential steps for microscopy image analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Deep Learning has provided us with very powerful methods to perform these segmentation tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In particu- lar, recently published neural networks, such as NucleAIzer [1], Cellpose [2] or StarDist [3], trained on hundreds of images of different modalities, give excel- lent results, outperforming by far traditional methods for image segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' However, the main drawback of state-of-the-art networks is the need for large amounts of fully annotated ground truth images, which can take a significant amount of time to create.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Here, we present an alternative strategy, where we pre- train our segmentation models using In Silico Labeling (ISL) before fine-tuning them on a very small data set to perform nucleus and cell segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' ISL was first introduced by [4], aiming to predict fluorescent labels from bright-field inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Fluorescence microscopy is the major technique employed arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='03914v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='IV] 10 Jan 2023 2 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Bonte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' in cellular image-based assays, as the use of fluorescence labels allows to high- light particular structures or phenotypic cell states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' However, the number of fluorescent labels is limited (typically up to 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In addition, phototoxicity and photobleaching can also represent serious drawbacks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' To tackle these limitations, several variants have been proposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In [5], ISL is applied to predict fluorescent labels from transmitted-light images (DIC), or immunofluorescence from electron micrographs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Besides, Generative Adversarial Networks (GAN) are used in [6] to predict different stains: H&E, Jones Silver or Masson’s trichrome.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' They underlie staining standardization as an advantage of ISL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In another paper [7] GANs are also used on different transmitted light images: quantitative phase images (QPI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Moreover, in [8] conditional GANs (cGAN) generate H&E, PSR and Orcein stained images from unstained bright- field inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In [9], using the same data set and same tasks as [4], the authors add attention blocks to capture more information than usual convolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Finally, stained images of human sperm cells are generated in [10], from quantitative phase images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' They use these virtually stained images to recognize normal from abnormal cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The principle of ISL has also been proposed for experimental ground truth generation for training cell classifiers for the recognition of dead cells [11, 12], tumour cells [13] embryo polarization [14] or the cell cycle phase [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In this paper we show that models trained to generate fluorescence mi- croscopy images with nuclear or cytoplasmic markers can be used efficiently to pretrain segmentation networks for nuclear and cell segmentation, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' To the best of our knowledge, no previous work has used ISL as a pretext task for segmentation of cell structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' This provides us with a powerful strategy to minimize the annotation burden for a given application, and to train models on large data sets, requiring only minimal effort in terms of manual annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2 Materials and Methods 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 Image Acquisition We work on two different data sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The first dataset has been generated by the Opera Phenix™ Plus High-Content Screening System (Perkin Elmer).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' It contains 960 images of dimension (2160, 2160).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' For each position, we acquired bright- field images and DAPI, both at 4 different focal planes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' DAPI is a very common fluorescent stain binding to AT-rich regions of the DNA, which can thus be used to locate the nucleus in eukaryotic cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Additionally we have a phase contrast image, computationally created from the 4 bright-field images by a proprietary algorithm of the Opera system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Images contain on average 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='6±19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='6 cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Our second data set contains 100 images of dimension (1024, 1024).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We used Differential Interference Contrast (DIC) as label-free microscopy technique, and we marked the cytoplasmic membrane with the CellMask™ marker (Life Technologies).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Images contain on average 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4±15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Learning with minimal effort: leveraging ISL for segmentation 3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 Nucleus Segmentation Nucleus segmentation is one of the most important segmentation tasks in biology, as nuclear morphologies are indicative of cellular states, and because they are visually very different from the cytoplasm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Segmentation of the nucleus is usually a comparatively simple segmentation task, and for this reason we assumed that this might be a good first segmentation problem to investigate our ISL-based pretraining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' DAPI prediction as pretraining task The first step of our strategy for nucleus segmentation is the prediction of DAPI images from bright-field inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We used a data set of 421 images of dimension (2160, 2160), divided into 384 images for training and 37 images for testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 5 images of dimension (512, 512) were randomly cropped from each initial image (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Note that we only included images containing at least one nucleus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Inspired by the work of [4], the model is a U-net-shape model [16] with a densenet121 architecture [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' It has been previously trained on ImageNet [18], hence it is referred to as ’on steroids’ in the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' As input we used 3 channels, 2 being bright-field images of the same field-of-view with different focal planes, and the third the corresponding phase-contrast image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' As output we used only one channel, the maximum intensity projection of our DAPI images (z-stack, 4 focal planes) that we have for each field-of-view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We did not use any data augmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' All training details are reported in Supplementary Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) (b) (c) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 1: Images from the same field-of-view, for a given focal plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) Bright- field image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (b) Phase contrast image, computationally generated by the Opera system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (c) Fluorescent DAPI image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Transfer Learning for Nucleus Segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In a first step, we aimed at investigating how pretraining on fluorescent markers impacts semantic segmen- tation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' For this, we turned to nucleus segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 4 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Bonte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In order to generate the ground truth, we applied Cellpose [2], a widely used segmentation technique in bioimaging, based on a U-net-shaped network, trained on massive amounts of heterogeneous data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We applied Cellpose to the DAPI channel and corrected the segmentation results manually.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' As segmentation of nuclei from DAPI images with high resolution is a fairly simple task, as expected the results were overall excellent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Next, we used training sets with different sizes N ∈ {1, 10, 50, 100, 200, 500}, composed of images of dimension (2160, 2160) and evaluated the accuracy for each N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Testing is always performed on the same withheld 190 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 5 images of dimension (512, 512) were randomly cropped from each initial image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' To investigate whether our pretraining scheme is useful for segmentation, we compare two different models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The first model is composed of the U-net ’on steroids’ followed by a sigmoid activation function in order to output, for each pixel, its probability of belonging to a nucleus (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The second model has the same U-net architecture but is pretrained on DAPI images, and has an activation function displayed in equation (1) that takes a different range into account (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The reason for this choice is that the model pretrained on DAPI images is likely to output values between 0 and 1, so we centered the following activation function around 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' f(x) = 1 1 + exp(−(x − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5)) (1) We did not use any data augmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' All training details are reported in Supplementary Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='3 Cell Segmentation We next turned to the application of our pretraining scheme to cell segmentation, a more difficult multiple instance segmentation scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' CellMask™ Prediction as Pretraining Task In our pretraining strategy, the first step of cell segmentation is the prediction of CellMask™ (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='3b) images from DIC microscopy as inputs (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='3a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We used a data set of 100 images of dimension (1024, 1024), divided into 90 images for training and 10 images for testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 5 images of dimension (512, 512) were randomly cropped for each initial image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' For comparison, we again used the U-net ’on steroids’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We did not use any data augmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' All training details are reported in Supplementary Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Transfer Learning for Cell Segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Segmentation of cells is usually more difficult than nuclear segmentation, because cells tend to touch each other, and the precise detection of the contact line can be challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Indeed, we need to turn to multiple instance segmentation, where object properties are predicted together with pixel labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Learning with minimal effort: leveraging ISL for segmentation 5 (a) (b) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2: Compared models to predict nucleus semantic segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) U-net ’on steroids’ which has not been trained on DAPI images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (b) U-net ’on steroids’ pretrained on DAPI images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Note the difference in the activation functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) (b) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 3: Images from the same field-of-view, for a given focal plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) DIC image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (b) Fluorescent CellMask™ image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' U-net (densenet121) Sigmoid function Cellpose Phase contrast Pretrained on 1 segmentation + Bright field ImageNet (2 focal planes) 1 + exp(-x)U-net (densenet121) Custom sigmoid Cellpose Phase contrast Pretrained on DAPl function + Bright field segmentation images (2 focal planes) 1 1 + exp(-(x - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5))6 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Bonte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Again, we used Cellpose [2] with manual correction to generate this instance segmentation ground truth images from associated CellMask™ images (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4a, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' As for nuclear segmentation, we used training sets of different sizes N ∈ {1, 10, 50, 80} of dimension (1024, 1024) and evaluated the accuracy for each of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Testing is always performed on the same 17 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 5 images of dimension (512, 512) were randomly cropped from each initial image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' To tackle the issue of instance segmentation, we implemented a model pre- dicting both a cell semantic segmentation image (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4c) and a distance map, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' an image where pixels values get higher as they are closer to the center of the cell, the background remaining black (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4d), as proposed in [19], [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) (b) (c) (d) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 4: (a) Fluorescent CellMask™ image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (b) Corresponding cell instance seg- mentation image generated by Cellpose.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (c) Cell semantic segmentation image generated from Cellpose output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (d) Distance map generated from Cellpose out- put.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Like in the previous section we compare two models to investigate whether transfer learning from an ISL model can significantly improve the accuracy of our segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The first model is the U-net ’on steroids’, outputting 2 channels (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The second model has the same U-net architecture but is pretrained on CellMask™ images, thus outputting only 1 channel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Hence we add two Conv2d layers at the end to upscale to 2 channels (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We did not use any data augmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' All training details are reported in Supplementary Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Both models use CombinedLossα, presented in equation (2), as loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' MSELoss stands for the usual Mean Square Error, while BCEWithLogitsLoss combines a sigmoid layer with the Binary Cross Entropy loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' y represents the output of our model, with the two channels yd and ys standing for the distance and semantic segmentation image, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The factor α is used to balance the weights of the different losses during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' It has been set as α = 2000, 2000 being the initial ratio between MSELoss and BCEWithLogitsLoss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' This has been inspired by the loss function used in Cellpose [2], which also uses a loss function computed as the sum of two loss functions, one for each output channel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Learning with minimal effort: leveraging ISL for segmentation 7 CombinedLossα(y) = CombinedLossα((yd, ys)) = MSELoss(yd) + α · BCEWithLogitsLoss(ys) (2) (a) (b) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 5: Models compared to predict cell instance segmentation: (a) U-net ’on steroids’ which has not been trained on CellMask™ images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (b) U-net model pretrained on CellMask™ images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Finally, we apply a post-processing step to get the final results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' For this, we apply the h-maxima transformation of the predicted distance map, with h = 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The h-maxima transformation is defined as the reconstruction by dilation of f−h under f: HMAXh(f) = Rδ f(f − h), and removes insignificant local maxima.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' f stands for the initial image, which is in our case the reconstructed distance map displayed in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' h stands for the minimum local contrast for a local maximum to be kept;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' otherwise it will be removed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Each local maximum represents a cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The local maxima of HMAX then serve as seed for the watershed algorithm, which splits semantic segmentation result into individual regions, one for each Distance map DIC U-net (densenet121) (3 focal planes) Pretrained on ImageNet Ce semantic segmentationDistance map U-net (densenet121) DIC Pretrained on 2 Conv2d blocks (3 focal planes) CellMask images Ce semantic segmentatior8 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Bonte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' maximum of HMAX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' This leads to an instance segmentation image, such as the one presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 6: Pipeline to get instance segmentation image from both distance map and semantic segmentation image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' H-maxima transform followed by watershed algorithm enable to segment cells instance-wise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 3 Results 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 Evaluation metrics The metric used to evaluate DAPI and CellMask™ prediction performance is the Pearson Correlation Coefficient (PCC, equation (3)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' PCC is defined as the covariance of two variables divided by the product of their standard deviations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In the equation, x and y are two images to compare, and ¯x is the average of x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' PCC(x, y) = n� i=0 (xi − ¯x)(yi − ¯y) � n� i=0 (xi − ¯x)2 � n� i=0 (yi − ¯y)2 (3) To evaluate nucleus semantic segmentation, we use the Jaccard index (equa- tion (4)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The Jaccard index, or Intersection Over Union (IoU), is a very popu- lar metric in segmentation, as it equally penalizes both False Positive and False Negative pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' A perfect segmentation would lead to IoU of 1, while IoU cor- responds to an entirely missed object (no intersection).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' IoU(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' y) = � 1 if x ∪ y = 0 x∩y x∪y otherwise (4) While the IoU is perfectly suitable to make pixel-wise comparisons for seman- tic segmentation,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' the performance of instance segmentation needs to incorporate Reconstruction by dilation Distance map Reconstructed distance map Watershed Local maxima Cell segmentation MaskLearning with minimal effort: leveraging ISL for segmentation 9 an object-wise comparison that does not only penalize wrong pixel decisions,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' but also fused or split objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' For this, we choose to use the Mean Average Precision (mAP), which is a popular metric for instance segmentation evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' For this, a connected component from the ground truth is matched with a connected com- ponent from the segmentation result, if the IoU of the two components is above a given threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In this case, the object is considered as a TP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Unmatched connected components from the ground truth and the segmentation result are considered as FN and FP, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Thus, given an IoU threshold one can compute the precision as defined in equation (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Precision = TP TP + FP + FN (5) Precision is computed for all 10 IoU thresholds in {0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5 + i × 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='05, i ∈ [[0, 9]]}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The final result is the mean of these 10 values, hence called mean AP, or mAP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 Nucleus Segmentation DAPI prediction yields very good results, with a PCC of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='95±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Using the Jaccard index (or IoU) as metric, the U-net ’on steroids’ gives 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='64±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 after training on 1 single image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In comparison, the model pretrained on DAPI reaches 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='84±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1, improving the previous score by 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='3% (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='7a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' This improvement decreases as the size of the training set increases, being 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='8% (respectively 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1%, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0%, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0%, -1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1%) after training on 10 (respectively 50, 100, 200, 500) (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='7b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Results from both models trained on 1 single image are displayed in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) (b) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 7: Nucleus segmentation results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) Intersection Over Union (IoU) score for non ISL-pretrained and ISL-pretrained models, after training on 1 image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (b) Evolution of IoU average score for both models for different training set sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='8 18000000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='6 loU 8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 oo 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0 8 Non ISL-pretrained ISL-pretrainedloU evolution in function of training set size 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4 ISL-pretrained model 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 Non ISL-pretrained model 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0 1 10 50 100 200 500 Training set size (number of pictures - log scale)10 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Bonte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 8: Input bright-field images, DAPI images, DAPI predictions generated by U-net ’on steroids’, ground truth instance segmentation generated by Cellpose, non ISL-pretrained U-net ’on steroids’ segmentation prediction, ISL-pretrained U-net ’on steroids’ segmentation prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Segmentation is performed after training on 1 image for both models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='3 Cell Segmentation CellMask™ prediction also yields very good results, with a PCC of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='97±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Using mAP as metric, the U-net ’on steroids’ gives 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='17±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 after training on 1 single image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In comparison, the model pretrained on CellMask™ reaches 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='33±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='09, improving the previous score by 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1% (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='9a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' As in the previous section this improvement decreases as the size of the training set increases, be- ing 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5% (respectively -3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0%, -2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='9%) after training on 10 (respectively 50, 80) (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='9b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Results from both models trained on 1 single image are displayed in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 4 Discussion The results presented in the previous sections show that pretraining with In Silico Labeling as pretext task significantly improves the performance of a seg- mentation model trained on a very small data set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Indeed, the accuracy raises by 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='3% and 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1% for nucleus semantic segmentation and cell instance segmen- tation, respectively, after training on 1 single image, using a model pretrained in an ISL setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' The fact that pretraining on DAPI images helps to generate a nucleus seman- tic segmentation was actually expected since the two outputs (DAPI and binary segmentation maps) are very close to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' On the other hand, cell instance Ground truth Non ISL-pretrained ISL-pretrained Input GroundtruthDAPl PredictedDAPl segmentation segmentation segmentationLearning with minimal effort: leveraging ISL for segmentation 11 (a) (b) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 9: Cell segmentation results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (a) mAP score for non ISL-pretrained and ISL- pretrained models, after training on 1 image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' (b) Evolution of mAP average score for both models for different training set sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 10: Input DIC images, CellMask™ images, CellMask™ predictions gener- ated by U-net ’on steroids’, ground truth instance segmentation generated by Cellpose, non ISL-pretrained U-net ’on steroids’ segmentation prediction, ISL- pretrained U-net ’on steroids’ segmentation prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Segmentation is per- formed after training on 1 image for both models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4 AP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='3 m 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0 Non IsL-pretrained ISL-pretrainedmAp evolution in function of training set size 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 ISL-pretrained model 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0 Non iSL-pretrained model 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='8 mAP 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='0 1 10 50 80 Training set size (number of pictures - log scale)Ground truth Predicted Ground truth NonIsL-pretrained ISL-pretrained Input CellMask CellMask segmentation segmentation segmentation12 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Bonte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' segmentation is a much more complex problem, and our results clearly indicate that also in this situation, pretraining with fluorescent marker prediction as a pretext task significantly improves segmentation accuracy for small datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We also observe that transfer learning is useful if we work on a very small data set (1 to 10 images), but that for both nucleus and cytoplasmic segmentation, the accuracy difference disappears if the models are trained on more than 10 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' This being said, if one has access to fluorescent images, it makes sense to use our proposed method to pretrain the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' From a practical point of view, this idea provides an interesting alternative to manual annotation, in particular in the context of High Content Screening, where it is fairly easy to generate large amounts of data that contain both label-free and fluorescently labeled microscopy images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In this case, we can train efficient models for fluorescence prediction, and use these models in a pre-training scheme to reduce the manual annotation burden.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Finally, we showed here that this pre- training scheme is effective for segmentation of nuclei and cells, but we also believe that this could be effective for any other type of cell structures as soon as you can get the associated fluorescent images available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Furthermore, it will be interesting to investigate to which extent the pre-training scheme provides good starting points for generalist networks, applicable to a wide variety of modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 5 Conclusion In this paper, we demonstrated that pretraining on the prediction of relevant fluorescent markers can be very useful to segment nuclei or cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We showed that a model trained to predict some fluorescent structures from label-free microscopy can learn to segment these structures from a very small data set, down to 1 single image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' We believe that this can be of great help for applications where fluorescent data are easily available, if one wants to avoid tedious manual annotation to build large ground truth datasets for the training of neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' With only a few images, it is possible to fine-tune a pretrained model achieving performances matching those obtained by ImageNet-pretrained state-of-the-art networks fine- tuned on a much larger set of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Our pre-training scheme can thus help biologists to save time and money without sacrificing any accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Code availability Code (pre-processing, training and testing, post-processing pipelines), is available at https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='com/15bonte/isl segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Acknowledgments This work has been supported by the French government under management of Agence Nationale de la Recherche (ANR) as part of the “Investissements d’avenir” program, reference ANR-19-P3IA-0001 (PRAIRIE 3IA Institute), the Q-Life funded project CYTODEEP (ANR-17-CONV-0005) and the ANR project TRANSFACT (ANR-19-CE12-0007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Furthermore, we also acknowledge support by France-BioImaging (ANR-10-INBS-04).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Learning with minimal effort: leveraging ISL for segmentation 13 References [1] Reka Hollandi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “nucleAIzer: A Parameter-free Deep Learning Frame- work for Nucleus Segmentation Using Image Style Transfer”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Cell Sys- tems 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5 (May 2020), 453–458.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='e6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 24054712.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='cels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [2] Carsen Stringer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Cellpose: a generalist algorithm for cellular segmen- tation”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Nature Methods 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 (Jan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2021), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 100–106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 1548-7105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/s41592-020-01018-x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/ s41592-020-01018-x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [3] Uwe Schmidt et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Cell Detection with Star-Convex Polygons”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Medi- cal Image Computing and Computer Assisted Intervention - MICCAI 2018 21st International Conference, Granada, Spain, September 16-20, 2018, Proceedings, Part II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 265–273.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1007/978-3-030-00934- 2_30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [4] Eric M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Christiansen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “In Silico Labeling: Predicting Fluorescent Labels in Unlabeled Images”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Cell 173.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='3 (2018), 792–803.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='e19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 0092-8674.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='040.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='com/science/article/pii/S0092867418303647.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [5] Chawin Ounkomol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Label-free prediction of three-dimensional fluo- rescence images from transmitted-light microscopy”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Nature Methods 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='11 (Nov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2018), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 917–920.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 1548-7105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/s41592- 018-0111-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/s41592-018-0111-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [6] Yair Rivenson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Virtual histological staining of unlabelled tissue- autofluorescence images via deep learning”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Nature Biomedical Engi- neering 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='6 (June 2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 466–477.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 2157-846X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/ s41551-019-0362-y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/s41551-019- 0362-y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [7] Yair Rivenson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “PhaseStain: the digital staining of label-free quan- titative phase microscopy images using deep learning”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Light: Science & Applications 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 (Feb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2019), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 2047-7538.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/ s41377-019-0129-y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/s41377-019- 0129-y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [8] Dan Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Deep Learning for Virtual Histological Staining of Bright- Field Microscopic Images of Unlabeled Carotid Artery Tissue”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Molecu- lar Imaging and Biology 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='5 (Oct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 1301–1309.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 1860-2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1007/s11307-020-01508-6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1007/ s11307-020-01508-6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [9] Yi Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Global Pixel Transformers for Virtual Staining of Microscopy Images”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: IEEE Transactions on Medical Imaging PP (Jan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 1–1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1109/TMI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2968504.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [10] Yoav N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Nygate et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Holographic virtual staining of individual biological cells”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Proceedings of the National Academy of Sciences 117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='17 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 9223–9231.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1919569117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' eprint: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1919569117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/doi/abs/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1919569117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 14 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Bonte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [11] Joseph Boyd et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Experimentally-Generated Ground Truth for Detect- ing Cell Types in an Image-Based Immunotherapy Screen”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: 2020 IEEE 17th International Symposium on Biomedical Imaging (ISBI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 886– 890.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1109/ISBI45749.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='9098696.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [12] Chenfei Hu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Live-dead assay on unlabeled cells using phase imaging with computational specificity”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Nature Communications 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 (Feb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2022), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 713.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 2041-1723.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/s41467-022-28214-x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/s41467-022-28214-x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [13] Jingfang K Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Automatic Colorectal Cancer Screening Using Deep Learning in Spatial Light Interference Microscopy Data”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' en.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Cells 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4 (Feb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [14] Cheng Shen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Stain-free detection of embryo polarization using deep learning”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: Scientific Reports 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 (Feb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2022), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2404.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 2045-2322.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/s41598-022-05990-6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1038/ s41598-022-05990-6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [15] Yuchen R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Cell Cycle Stage Classification Using Phase Imaging with Computational Specificity”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: ACS Photonics 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='4 (2022), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 1264– 1273.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1021/acsphotonics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1c01779.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' eprint: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1021/acsphotonics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1c01779.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1021/ acsphotonics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1c01779.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [16] Olaf Ronneberger, Philipp Fischer, and Thomas Brox.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' U-Net: Convolu- tional Networks for Biomedical Image Segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='48550/ ARXIV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='04597.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' url: https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='org/abs/1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='04597.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [17] Pavel Yakubovskiy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Segmentation Models Pytorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='com/ qubvel/segmentation_models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='pytorch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [18] Jia Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Imagenet: A large-scale hierarchical image database”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: 2009 IEEE conference on computer vision and pattern recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Ieee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 2009, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 248–255.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [19] Peter Naylor et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Nuclei Segmentation in Histopathology Images Using Deep Neural Networks”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: 2017 IEEE 14th International Symposium on Biomedical Imaging (ISBI 2017) (2017), IEEE, EMB, IEEE Signal Proc Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' issn: 19458452.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1109/ISBI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='7950669.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' [20] Peter Naylor et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' “Segmentation of Nuclei in Histopathology Images by Deep Regression of the Distance Map”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' In: IEEE Transactions on Medical Imaging 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2 (2019), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 448–459.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1109/TMI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='2865709.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Learning with minimal effort: leveraging ISL for segmentation 15 Supplementary material Input Output Pretrain Images Crops Training Epochs Learning Loss Training Evaluation samples rate time BF + PC DAPI ImageNet 384 1920 192 000 1000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 L1 ∼2d PCC BF + PC Nuclei ImageNet 1 5 5 000 5 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼2h IoU BF + PC Nuclei ImageNet 2 10 5 000 5 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼2h IoU BF + PC Nuclei ImageNet 10 50 5 000 1 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼2h IoU BF + PC Nuclei ImageNet 50 250 10 000 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼2h IoU BF + PC Nuclei ImageNet 100 500 10 000 200 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼2h IoU BF + PC Nuclei ISL (DAPI) 1 5 20 000 20 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼3h IoU BF + PC Nuclei ISL (DAPI) 2 10 20 000 20 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼3h IoU BF + PC Nuclei ISL (DAPI) 10 50 20 000 4 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼3h IoU BF + PC Nuclei ISL (DAPI) 50 250 20 000 800 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼3h IoU BF + PC Nuclei ISL (DAPI) 100 500 20 000 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 Jaccard ∼3h IoU Table 1: Training details for DAPI and nucleus segmentation models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Segmen- tation models were supposed to be trained on 20 000 training samples, yet for non ISL-pretrained models the training was stopped earlier as the model was not learning anything after some point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' All models have the same U-net densenet121 architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' They were trained with an ADAM optimizer on GPU hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' BF stands for bright-field and PC for phase contrast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' 16 T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Bonte et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' Input Output Pretrain Images Crops Training Epochs Learning Loss Training Evaluation samples rate time DIC CellMask™ ImageNet 65 325 132 000 4000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='1 L1 ∼1d PCC DIC Distance + Mask ImageNet 1 5 10 000 10 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼1h30 mAP DIC Distance + Mask ImageNet 2 10 10 000 10 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼1h30 mAP DIC Distance + Mask ImageNet 10 50 10 000 2 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼1h30 mAP DIC Distance + Mask ImageNet 50 250 10 000 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼1h30 mAP DIC Distance + Mask ImageNet 80 400 10 000 250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼1h30 mAP DIC Distance + Mask ISL (CellMask™) 1 5 10 000 10 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼2h mAP DIC Distance + Mask ISL (CellMask™) 2 10 10 000 10 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼2h mAP DIC Distance + Mask ISL (CellMask™) 10 50 10 000 2 000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼2h mAP DIC Distance + Mask ISL (CellMask™) 50 250 10 000 400 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼2h mAP DIC Distance + Mask ISL (CellMask™) 80 400 10 000 250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content='01 CombinedLossα ∼2h mAP Table 2: Training details for CellMask™ and cell segmentation models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' All models have the same U-net densenet121 architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} +page_content=' They were trained with an ADAM optimizer on GPU hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/mNE2T4oBgHgl3EQfeQdZ/content/2301.03914v1.pdf'} diff --git a/mNE4T4oBgHgl3EQftw2O/vector_store/index.pkl b/mNE4T4oBgHgl3EQftw2O/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7b86236f8cbf469767c6b353ebcdc09a7aa3e016 --- /dev/null +++ b/mNE4T4oBgHgl3EQftw2O/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02e713ac741c977276871d52c1b6fd665ad7f93236abbfa34413b00ec850771e +size 464116 diff --git a/mNE_T4oBgHgl3EQf6xyS/vector_store/index.faiss b/mNE_T4oBgHgl3EQf6xyS/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..a6d01bc4a477790364b4d6b44cd2ab3e7c869404 --- /dev/null +++ b/mNE_T4oBgHgl3EQf6xyS/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3899d274ca7d731c7d28ac836ac2ad568360ae1d5109a18a3bbfe90bd0f8beaf +size 5308461 diff --git a/mtFPT4oBgHgl3EQf4zVh/content/2301.13194v1.pdf b/mtFPT4oBgHgl3EQf4zVh/content/2301.13194v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1295de4697cf5a49c96d3410bdc6bce33cf0ee21 --- /dev/null +++ b/mtFPT4oBgHgl3EQf4zVh/content/2301.13194v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee9af72878f6fb55e3b65083b881bd8c029d2036ac537dc8d10608d272666b78 +size 1344698 diff --git a/mtFPT4oBgHgl3EQf4zVh/vector_store/index.pkl b/mtFPT4oBgHgl3EQf4zVh/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6b98047f46c0e8b917c64c6c544f02a0856a0edb --- /dev/null +++ b/mtFPT4oBgHgl3EQf4zVh/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8d3e4f2962b7b6d87444fad8ccab3ed7f1e700ef7575d773209554ce8301e92 +size 174992 diff --git a/n9FKT4oBgHgl3EQfFy0J/content/tmp_files/2301.11721v1.pdf.txt b/n9FKT4oBgHgl3EQfFy0J/content/tmp_files/2301.11721v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..622a94a9b5a7af017170f6d761023284b8a86208 --- /dev/null +++ b/n9FKT4oBgHgl3EQfFy0J/content/tmp_files/2301.11721v1.pdf.txt @@ -0,0 +1,2129 @@ +Single-Trajectory Distributionally Robust Reinforcement Learning +Zhipeng Liang * 1 Xiaoteng Ma * 2 Jose Blanchet 3 Jiheng Zhang 1 Zhengyuan Zhou 4 +Abstract +As a framework for sequential decision-making, +Reinforcement Learning (RL) has been regarded +as an essential component leading to Artificial +General Intelligence (AGI). However, RL is of- +ten criticized for having the same training envi- +ronment as the test one, which also hinders its +application in the real world. To mitigate this +problem, Distributionally Robust RL (DRRL) is +proposed to improve the worst performance in +a set of environments that may contain the un- +known test environment. Due to the nonlinear- +ity of the robustness goal, most of the previous +work resort to the model-based approach, learn- +ing with either an empirical distribution learned +from the data or a simulator that can be sampled +infinitely, which limits their applications in sim- +ple dynamics environments. In contrast, we at- +tempt to design a DRRL algorithm that can be +trained along a single trajectory, i.e., no repeated +sampling from a state. Based on the standard +Q-learning, we propose distributionally robust +Q-learning with single trajectory (DRQ), and its +average-reward variant named differential DRQ. +We provide asymptotic convergence guarantees +and experiments for both settings, demonstrating +their superiority in the perturbed environments +against the non-robust ones. +1. Introduction +Reinforcement Learning (RL) is a machine learning +paradigm studying the sequential decision problem. In par- +ticular, it aims to learn an optimal policy that maximizes +the cumulative return in interacting with the environment. +While remarkable progress has been shown recently (Silver +et al., 2016; Mnih et al., 2015; Vinyals et al., 2019), a key +*Equal contribution +1Hong Kong University of Science +and +Technology +2Tsinghua +University +3Stanford +Univer- +sity +4New York University. +Correspondence to: +Zhipeng +Liang +, +Xiaoteng +Ma +. +assumption always lies beneath, i.e., the test environment in +which the learning algorithm to be employed is the same as +the training environment. The discrepancy between them is +prevalent as the test environments are often too complicated +to be perfectly captured in the training one. Moreover, even +though the true environment can be adequately modeled and +used as the training environment, the environment shifts +may still be prevalent in some application scenarios, e.g., +financial market and robotic control. The overlooking of +the mismatch between them could lead to the failure of RL +algorithms to be applied in real-world applications, as the +optimal policy of a Markov Decision Process (MDP) is well +known for being sensitive to the model (Mannor et al., 2004; +Iyengar, 2005). +As a result, the environment shift should be included in the +training process to learn a practical RL policy. Distribu- +tionally Robust MDPs (DRMDPs) (Satia & Lave Jr, 1973; +Nilim & El Ghaoui, 2005; Iyengar, 2005; Wiesemann et al., +2013; Lim et al., 2013; Ho et al., 2021; Goyal & Grand- +Clement, 2022) adopt this idea and formulate the problem +by assuming the test environment, even different from, but +still staying nearby the training environment. The goal is to +design algorithms to optimize the worst-case expected return +over an ambiguity set that contains all the possible testing +distributions. Solving the robust MDPs with a general am- +biguity set could be NP-hard (Wiesemann et al., 2013) and +thus, to ensure computational feasibility, it is common to +assume s/(s, a) rectangular assumption, which allows the +ambiguity set to be constructed for each s/(s, a) pair. Built +upon them, (Iyengar, 2005; Nilim & El Ghaoui, 2005) in- +stantiate the probability distance measure in constructing +ambiguity set with specific cases of φ-divergence, such as +χ2 and KL divergences. Moreover, while searching over +the ambiguity set is an infinite dimensional optimization +problem, Shapiro (2017) proves that it can be transformed +into a two-dimensional optimization problem. +In reality, we can mainly access the samples from the train- +ing environment instead of the transition model. In this case, +optimizing a policy w.r.t. the performance in a known test +environment with only access to the samples from the train- +ing environment is even more challenging, which is being +solved by the distributionally robust RL (DRRL) literature. +Theoretical understanding, including the sample efficiency, +has been actively progressed (Zhou et al.; Yang et al.; Shi +arXiv:2301.11721v1 [stat.ML] 27 Jan 2023 + +Single-Trajectory Distributionally Robust Reinforcement Learning +& Chi; Panaganti et al.; Ma et al., 2022). Besides, a line of +recent attention focuses on designing practical algorithms +and shows promising empirical evidence in robustness gain- +ing (Liu et al., 2022; Neufeld & Sester, 2022; Abdullah +et al., 2019). +A natural choice to solve the DDRL problem is the model- +based framework (Yang, 2018; Abdullah et al., 2019; +Neufeld & Sester, 2022), which first estimates the tran- +sition model from data, constructs the ambiguity set around +the empirical distribution, and solves the robust problem +approximately. Although model-based methods are often +more sample-efficient and easier to analyze, they are also +well-known for their high computational cost, great demand +for memory to store the whole MDP model, and possible +failure to generalize to non-tabular RL settings. Moreover, +real environments in some scenarios, such as image-based +tasks, are too complicated to be adequately modeled. Thus, +the policy learned from the mismatch model may signifi- +cantly deviate from the true one. +In contrast, model-free algorithms learn to select actions +directly, bypassing the above challenges from the model- +based methods. In particular, Q-learning (Watkins & Dayan, +1992), a paradigm of the model-free algorithm, enjoys both +theoretical understanding and wide deployment in practical +applications (Mnih et al., 2015; Lillicrap et al.). There are +indeed some attempts to solve DRRL without models, e.g., +(Liu et al., 2022). Instead, the key assumption beneath them +is the weak simulator assumption, where the algorithm can +access an arbitrary number of samples from any state-action +pair. Such an assumption is too restricted when the simu- +lator is inaccessible and fails to achieve a pure model-free +training manner. As sequential decision algorithms, RL +agents should mostly be trained along a single trajectory, +where the observed next state is the only realized sample +following the transition model from the current state. Most +model-free non-robust RL algorithms support training in +this setting, which flourishes their wide applications. In- +stead, as far as we know, no existing work can solve the +DRRL problem in this manner due to the nontrivial diffi- +culty arising from robustness gain requirements. Thus a +natural question arises, +Can we design a DRRL algorithm for learning along a +single trajectory? +1.1. Our Contributions +We design a distributionally robust Q-learning algorithm +with features beyond the previous DRRL algorithms. The +first feature is leveraging the strong duality of φ-divergence +distributionally robust problem to transform the infinite- +dimensional optimization problem into a finite-dimensional +one and derive the DR Bellman operator for various φ- +divergence, including χ2 and KL divergences. The second +one is that we develop a three timescales stochastic approx- +imation framework to solve the DR Bellman equation by +carefully exploiting the structure of the corresponding Bell- +man operator. The first two loops are designed to estimate +the non-linear Bellman operator of various φ-divergence, +and the final loop is to update the Q function. The dis- +crepancy in the speed between estimating the DR Bellman +operator and updating the Q function can alleviate the bias +from the nonlinear Bellman operator. Third is that we in- +stantiate our framework into the DR-variant of Q-learning +algorithms, distributionally robust Q-learning with single +trajectory (DRQ), to solve the discount MDPs using various +φ-divergence with single-trajectory. We prove that our pro- +posed algorithm asymptotically converges to optimal DR +policy. Fourth is that we extend our framework to solve the +DRRL problem with average reward and develop the differ- +ential DRQ algorithm. As far as we know, this is the first +DRRL algorithm in the average-reward RL setting. Finally, +we provide empirical evidence on navigation and queueing +control tasks to demonstrate the robustness and acceptable +sample efficiency of the policy learned by the proposed DR +Q learning algorithm. +1.2. Related Work +Distributional robustness has been actively studied in su- +pervised learning. While a stream of the literature focus +on the setting where the testing distribution is the same as +the training one, and the learner merely gauges the distri- +butional ambiguity level to balance statistical utility and +robustness carefully (Bertsimas & Sim, 2004; Delage & +Ye, 2010; Hu & Hong, 2013; Duchi & Namkoong, 2021), +another stream of work consider the setting where learning +predictive rules in testing distributions are different from +the training distributions and perturb the training sample +with some synthesized noise before solving the empirical +risk minimization problem, which has been shown to work +(Sinha et al., 2018; Goodfellow et al., 2014; Ganin et al., +2016; Zhang et al., 2019; Tram`er et al., 2017). Beyond +supervised learning, DR MDPs also have fruitful results +(Iyengar, 2005; El Ghaoui & Nilim, 2005; Xu & Mannor, +2010; Wiesemann et al., 2013). By assuming the known +MDP environment, DRMDPs mainly focus on the struc- +ture of the optimal policy and the computational cost for +planning. In contrast, recent emerging attention focuses +on learning the optimal DR policy only with access to the +samples collected from the environment. +2. Preliminary +2.1. Standard MDPs +Consider an infinite-horizon MDP (S, A, γ, µ, P, r) where +S and A are finite state and action spaces with cardinality +S and A. P : S × A → ∆S is state transition probability + +Single-Trajectory Distributionally Robust Reinforcement Learning +measure and r is the reward function. Without loss of gener- +ality, we assume that r : S ×A → [0, 1] is deterministic and +bounded in [0, 1]. A stationary policy π : S → ∆A maps, +for each state s to a probability distribution over the action +set A and induce a random trajectory s1, a1, r1, s2, · · · , with +s1 ∼ µ, an = π(sn) and sn+1 ∼ P(·|sn, an) := Psn,an +for n ∈ N+. +To derive the policy corresponding to the value function, we +define the action-value function Q : S × A → R as, +Q⋆(s, a) := sup +π∈Π +Eπ,P +� ∞ +� +n=1 +γn−1r(sn, an)|s1 = s, a1 = a +� +, +which provides the expected cumulative discounted rewards +under the optimal policy. The optimal Q function is the fixed +point of the following the Bellman optimality equation, +Q⋆(s, a) = r(s, a) + γEP +� +max +a∈A Q⋆(s′, a) +� +. +(1) +2.2. Q-learning +Our model-free algorithmic design relies on a Q-learning +template, originally designed to solve the non-robust Bell- +man optimality equation (Equation 1). +Q-learning is a +model-free reinforcement learning algorithm to use sam- +ple trajectories to update the estimation for the Q func- +tion incrementally. Suppose at time n, we draw a sample +(sn, an, rn, s′ +n), from the environment using the policy π. +Then the algorithm updates the estimated Q-function fol- +lowing +Qn+1(sn, an) = (1 − αn(sn, an))Qn(sn, an) ++ αn(sn, an)(rn + γ max +a′∈A Qn(s′ +n, a′)). +In other word, the algorithm updates the Q function by +incrementally updating the unbiased estimator for the true +Q value, i.e., rn + γ maxa′∈A Qn(s′ +n, a′). +2.3. Distributionally Robust MDPs +DRRL learns the optimal policy that is robust to unknown +environmental changes. In particular, the transition model +P and the reward function r can potentially be different +in the test environment. To better present our key contri- +butions, we restrict our attention to the perturbation of the +transition model and assume the reward function to be the +same. The notion of distributional robustness implies that +the true transition P, although not the same as the training +environment and cannot be known in advance, lies within a +so-called ambiguity set P. The ambiguity set P contains all +the transition models that are close to the training environ- +ment under some probability distance measure. +To ensure the computational feasibility, we adopt the (s, a)- +rectangular manner in constructing the ambiguity set (Iyen- +gar, 2005; Wiesemann et al., 2013): for each (s, a) ∈ S ×A, +we define ambiguity set containing all probability measure +with some distance D no more than ρ from Ps,a, +Ps,a := {P ′ +s,a|D(P ′ +s,a∥Ps,a) ≤ ρ}. +(2) +Then we build the ambiguity set for the whole transition +model as the Cartesian product for every (s, a)-ambiguity +set, i.e., P = � +(s,a)∈S×A Ps,a. This construction ensures +that P is a compact set. Given the ambiguity set P, we +define the optimal DR action-value function as, +Qrob,⋆(s, a) := sup +π∈Π +inf +P ∈P Eπ,P [ +∞ +� +n=1 +γn−1r(sn, an)|s1 = s, a1 = a]. +Under the (s, a)-rectangular assumption, the Bellman opti- +mality equation has been established by (Iyengar, 2005; Xu +& Mannor, 2010) +Qrob,⋆(s, a) = r(s, a) + γ inf +P ∈P Es′∼P +� +max +a∈A Qrob,⋆(s′, a) +� +. +(3) +For notation simplicity, we would ignore the superscript +rob. +3. Distributonally Robust Q-learning with +Single Trajectory +In this section, we aim to solve DRRL with single-trajectory +data. We first show the nontrivial difficulty aroused from +approximating the DR Bellman operator. Then we present +our three-timescale framework and instantiate it into the two +most common divergences, χ2, and KL divergences. +3.1. Bias in Plug-in Estimator +In general, directly using the DR Bellman optimal equation +(Equation 3) involve computing an expectation w.r.t. each +model P ∈ P. Searching the worst-case over the ambiguity +set P is an infinite-dimensional optimization problem that +can be computationally infeasible. Given that the samples +from the single trajectory are generated according to the +nominal model P, estimating the expectation under different +model P ′ ∈ P is even more challenging. Even if it can be +achieved by, e.g., importance sampling-based techniques, +the cost of high variance is still not desirable. +However, when restricting the probability measure to χ2- +divergence, i.e., Dχ2(Q, P) = +� +( dQ +dP − 1)2dP, the infinite- +dimensional optimization problem can be reduced to be +univariate, which significantly improve the computational +feasibility. This transformation is by exploiting the dual +form of the primal problem, which is formalized as follows. +Lemma 3.1 (Duchi & Namkoong (2021)). For any X ∼ P and +any ρ ∈ [1, ∞), the DRO problem with χ2 divergence can be + +Single-Trajectory Distributionally Robust Reinforcement Learning +transformed via, +inf +Dχ2 (P ′∥P )≤ρ EP ′[X] = sup +η∈R +σχ2(X, η) +:= sup +η∈R +� +η − +� +1 + ρ +� +EP [(η − X)2 ++] +� +. +(4) +Lemma 3.1 implies that the DRO with χ2 divergence can +be solved using the original probability measure P, which +sheds light on the single-trajectory solutions for the DRRL +problem. Applying Equation 4 to Equation 3 we know +the optimal action-value function is the fixed point of the +equation Q = T χ2(Q) with +T χ2(Q)(s, a) = r(s, a)+γ sup +η∈R +σχ2(max +a∈A Q(·, a), η). (5) +Similar to χ2 divergence, the DRO with KL divergence, +where DKL(Q, P) = +� +log( dQ +dP )dP for Q ≪ P, also en- +joys a univariate dual form as follow. +Lemma 3.2 ((Hu & Hong, 2013)). Suppose X ∼ P has a +finite moment generating function in the neighborhood of +zero. Then, +inf +P ′:DKL(P ′∥P )≤ρ EP ′[X] = sup +β≥0 +σKL(X, β) +:= sup +β≥0 +� +−β log +� +EP +� +e−X/β�� +− β · ρ +� +. +(6) +Applying Equations 6 to Equation 3 we have the optimal +action-value function under KL divergence is the fixed point +solution of Q = T KL(Q) with +T KL(Q)(s, a) = r(s, a) + γ sup +β≥0 +σKL(max +a∈A Q(·, a), β). +(7) +Sample average approximation (SAA), i.e., replacing the +expectation with the sample average, is a standard method +for constructing estimators. Following it, we can construct +a plug-in estimator by adopting the SAA in Equation 5 +and 7. However, sufficient samples may not be feasible for +constructing an accurate enough estimator for every (s, a)- +pair with single trajectory samples. +More challenging, this plug-in estimator is, in fact, biased +because of the nonlinearity of the Bellman operator. To +provide a concrete taste of the bias, consider the case when +only one sample is obtained, then the χ2 Bellman operator +in Equation 5 becomes, +r(s, a) + γ sup +η∈R +σχ2(max +a∈A Q(·, a), η) +=r(s, a) + γ sup +η∈R +{η − +� +1 + ρ(η − max +a +Q(s′, a))+} +=r(s, a) + γ max +a +Q(s′, a), +which is reduced to the non-robust Bellman operator and +obviously not an unbiased estimator for T χ2(Q). A similar +issue appears in KL divergence. +To solve this bias in the SAA method, Liu et al. (2022) +introduces the multilevel Monte-Carlo method (Blanchet +& Glynn, 2015), which requires a large batch of samples +for the same (s, a) pair before the next update, and it is not +flexible to be adopted in the online decision nature and is +prohibitive in our single trajectory setting. +Note that the non-robust Q-learning is solving the Bellman +operator’s fixed point in a stochastic approximation manner. +A salient feature in the DR Bellman operator is the bi-level +optimization nature, i.e., jointly solving the dual parameter +and the fixed point of the Bellman optimality equation. We +revisit the stochastic approximation view of the Q-learning +and develop a three-timescale framework by a faster running +estimate of the optimal dual parameter and the slower update +of the Q table. +In this paper, we mainly discuss the most common cases in +φ-divergence, i.e., when divergence is chosen as χ2 and KL +divergence. +3.2. χ2 Divergence +To incrementally update η, we aim to develop an SGD-type +algorithm. We present the gradient of σχ2(X, η) w.r.t. η. +Lemma 3.3 (Gradient of the χ2 dual function). +σ′ +χ2(X, η) = 1 − +� +ρ + 1 · Z1 +√Z2 +, +(8) +where +Z1 = EX∼P [(η − X)+] , +(9) +Z2 = EX∼P +� +(η − X)2 ++ +� +. +(10) +As the update of η and Q rely on each other, we keep the +learning speed of η and Q different to stabilize the training +process. Due to the (s, a)-rectangular assumption, η is +independent across different (s, a)-pairs while the Q table +depends on each other. The independent structure for η +allows it to be estimated more easily; thus, we approximate +it in a faster loop, while for Q we update it in a slower loop. +However, updating η is inherently challenging as the plug- +in gradient estimator, i.e., Equation 8, is also biased. Note +that for N i.i.d. samples X1, · · · , XN ∼ P, E[f(X)] ̸= +f(E[X]) for general f, which holds for f(x) = 1/√x as +here. Thus we introduce another even slower timescale to + +Single-Trajectory Distributionally Robust Reinforcement Learning +Algorithm 1 Distributionally Robust Q-learning with χ2 divergence +1: Input: Exploration rate ϵ, Learning rates {ζi(n)}i∈[3] +2: Init: Q(s, a) = 0, ∀(s, a) ∈ S × A +3: for n = 1, 2, · · · do +4: +Observe the state sn, execute the action an = arg maxa∈A Qn(sn, a) using ϵ greedy policy +5: +Observe the reward rn and next state s′ +n +6: +Update Zn,1, Zn,2 follow +Zn+1,1(sn, an) = (1 − ζ1(n))Zn,1(sn, an) + ζ1(n)(ηn,1(sn, an) − max +a +Qn(s′ +n, a))+, +Zn+1,2(sn, an) = (1 − ζ1(n))Zn,2(sn, an) + ζ1(n)(ηn,1(sn, an) − max +a +Qn(s′ +n, a))2 ++. +7: +Update ηn via ηn+1(sn, an) = (1 − ζ2(n))ηn(sn, an) + ζ2(n)(1 − √ρ + 1 Zn,1 +√ +Zn,2 ). +8: +Update Qn via Qn+1(sn, an) = (1 − ζ3(n))Qn(sn, an) + ζ3(n)(rn + γ(ηn(sn, an) − √1 + ρ +� +Zn,2(sn, an))). +9: end for +estimate Z1 and Z2. +Zn+1,1(sn, an) = (1 − ζ1(n))Zn,1(sn, an)+ +(11) +ζ1(n)(ηn(sn, an) − max +a +Qn(s′ +n, a))+, +Zn+1,2(sn, an) = (1 − ζ1(n))Zn,2(sn, an)+ +(12) +ζ1(n)(ηn(sn, an) − max +a +Qn(s′ +n, a))2 ++. +Then in the medium timescale we approximate the +η⋆(s, a) := arg maxη∈R σ2 +χ(Q, η) . +ηn+1(sn, an) = (1 − ζ2(n))ηn(sn, an)+ +ζ2(n)(1 − +� +ρ + 1 Zn,1 +� +Zn,2 +). +(13) +Finally, we update the DR Q function in the slowest time +scale via +Qn+1(sn, an) = (1 − ζ3(n))Qn(sn, an)+ +ζ3(n)T χ2 +n (Q)(sn, an), +(14) +where T χ2 +n (Q)(s, a) is the empirical version of Equation 5, +defined as +T χ2 +n (Q)(s, a) = r(s, a) + γ(ηn(s, a) − +� +1 + ρ · +� +Zn,2(s, a)). +Here ζ1(n), ζ2(n) and ζ3(n) are learning rates for three +timescales at time n, which will be specified later. +We prove the a.s. convergence of Algorithm 1 as Theo- +rem 3.4 and the proof is deferred in Appendix D.1. +Theorem 3.4. (Zn,1, Zn,2, ηn, Qn) in Algorithm 1 con- +verges to (Z⋆ +1, Z⋆ +2, η⋆, Q⋆) a.s. as n → ∞ where η⋆ and +Q⋆ are the solution of Equation 5 while Z⋆ +1 and Z⋆ +2 are the +corresponding quantity under η⋆ and Q⋆. +3.3. KL Convergence +Next we present the gradient of σKL(X, β) w.r.t. β. +Lemma 3.5 (Gradient of the KL dual function). +σ′ +KL(X, β) = −ρ − log Z1 − β−1 Z2 +Z1 +, +where +Z1 = EX∼P +� +e−X/β� +, +(15) +Z2 = EX∼P +� +Xe−X/β� +. +(16) +Again the plug-in estimator is biase and we use Zn,1 and +Zn,2 to approximate them separately via the following up- +date rules, +Zn+1,1(sn, an) = (1 − ζ1(n))Zn,1(sn, an) ++ ζ1(n)e−yn/βn(sn,an) +(17) +Zn+1,2(sn, an) = (1 − ζ1(n))Zn,2(sn, an) ++ ζ1(n)yne−yn/βn(sn,an), +(18) +where yn = maxa′∈A Qn(s′ +n, a′). For the second timescale, +we denote Dn+1(sn, an) as +Dn+1(sn, an) = ρ + log (Zn,1(sn, an)) ++ β−1 +n (sn, an)Zn,2(sn, an)/Zn,1(sn, an), +(19) +and then update βn+1(sn, an) via +βn+1(sn, an) = (βn(sn, an) − ζ2(n)Dn+1(sn, an))+, +(20) +where (x)+ = max{x, 0}. +For the third timescale, we solve the DR operator via +Qn+1(sn, an) = (1 − ζ3(n))Qn(sn, an) ++ ζ3(n)T KL +n +(Qn)(sn, an), +(21) + +Single-Trajectory Distributionally Robust Reinforcement Learning +where +T KL +n +(Q)(s, a) = rn(s, a)−γ(βn(s, a) log Zn,1(s, a)+βn(s, a)ρ). +We prove the a.s. convergence of Algorithm 2 as Theo- +rem 3.6 and the proof is deferred in Appendix D.2. +Theorem 3.6. (Zn,1, Zn,2, βn, Qn) in Algorithm 1 con- +verges to (Z⋆ +1, Z⋆ +2, β⋆, Q⋆) a.s. as n → ∞, where β⋆ and +Q⋆ are the solution of Equation 7 while Z⋆ +1 and Z⋆ +2 are the +corresponding quantity under η⋆ and Q⋆. +We summarize the update procedure as Algorithm 2. +3.4. Three Timescales Convergence Analysis +In this subsection, we provide the roadmap for establishing +the a.s. convergence of Algorithm 1 and 2 to the globally +optimal action-value function. Our argument is by gener- +alizing the classic machinery of two-timescale stochastic +approximation (Borkar, 2009) to a three-timescale frame- +work and validating our proposed algorithms to satisfy the +conditions needed. Our goal is to rewrite the Algorithm 1 +as (Algorithm 2 is similar) +Zn+1 = Zn,1 + ζ1(n)[f(Zn, ηn, Qn) + Mn+1], +ηn+1 = ηn + ζ2(n)g(Zn, ηn, Qn), +Qn+1 = Qn + ζ3(n)h(Zn, ηn, Qn), +where Zn = (Zn,1, Zn,2) and Mn is an appropriate martin- +gale difference sequences conditioned on some filtration Fn. +f, g, and h are appropriate Lipschitz functions that satisfy +the conditions needed for our ordinary differential equations +(ODEs) analysis. To ensure a.s. convergence, the stepsizes +for different loops need to be elegantly designed as follows +Assumption 3.7. The stepsizes ζi(n), i = 1, 2, 3 satisfy +� +n +ζi(n) = ∞, +� +n +ζ2 +i (n) < ∞, +ζ1(n) = o(ζ2(n)), ζ2(n) = o(ζ3(n)). +These stepsize schedules satisfy the standard conditions for +stochastic approximation algorithms and ensure that 1. the +key quantities in gradient estimator Z update on the fastest +timescale, 2. the dual variable for the DR problem β/η +update on the intermediate timescale, and 3. the Q table +updates on the slowest timescale. Examples of such stepsize +are ζ1(n) = +1 +1+n0.6 , ζ2(n) = +1 +1+n0.8 and ζ3(n) = +1 +1+n. +Under Assumption 3.7, when analyzing the behavior of the +Zn, the ηn and the Qn can be viewed as quasi-static. Thus +to study the behavior of the fastest loop, we analyze the +following ODEs, +˙Z(t) = f(Z(t), β(t), Q(t)), +˙η(t) = 0, ˙Q(t) = 0, +(22) +and can prove that ODEs (22) a.s. converge to the set +λ′ +1(η, Q) for proper η and Q and some mapping λ +′′ +1. Sim- +ilarly, Qn can be viewed as fixed when analyzing the be- +havior of ηn, and the corresponding ODEs to understand its +behavior is +˙η(t) = g(λ′′ +1(η(t), Q(t)), η(t), Q(t)), +˙Q(t) = 0. +(23) +Again, by exploiting the dual form of the DRO prob- +lem, we can prove these ODEs converge to the set +{λ′ +1(Q), λ′ +2(Q), Q|Q ∈ V } for some mapping λ′ +1 and λ′ +2 +with V is the set containing all the mapping from S to R. +Finally, we study the slowest timescale ODE, +˙Q(t) = h(λ′ +1(Q(t)), λ′ +2(Q(t)), Q(t)), +(24) +and use our analysis to prove the a.s. convergence of our +proposed Algorithm 1 and 2 to the globally optimal pair +(Z⋆ +1, Z⋆ +2, η⋆, Q⋆) and (Z⋆ +1, Z⋆ +2, β⋆, Q⋆), correspondingly. +4. Extension to Average Reward Setting +Some practical scenarios, including queueing control, con- +cern more about the long-run average performance. Thus, +the average expected reward, instead of the cumulative dis- +counted reward, may be preferred as the evaluation criterion. +We extend our three timescales framework to solve this +setting. +4.1. Average-Reward MDPs +In this subsection, we formalize the average reward MDPs, +defined by the tuple (S, A, P, r, µ), where each element S, +A, P, r and µ are the same as in Section 2.1. The average +reward rate for a given policy π and any initial state s is +r(π, P) := lim +n→∞ +1 +n +n +� +t=1 +Eπ,P [r(st, at)|s1 = s] , +which does not depend on the initial state under the com- +municating assumption 1 (see, e.g.,Puterman (2014)). The +best reward rate is defined as r⋆ := supπ r(π, P). The aver- +age reward RL tries to achieve r⋆. We can also define the +optimal relative action-value function as +Q⋆(s, a) := sup +π∈Π +Eπ,P [ +∞ +� +n=1 +(rn(sn, an) − r⋆)|s1 = s, a1 = a], +which is the cumulative difference between the immediate +reward and the optimal reward rate with the first state as s +and action as a. The relationship between the optimal aver- +age reward and the optimal relative action-value function +1We say an MDP is communicating if there exists a policy that +can transition from one to any other state in a finite number of +steps with non-zero probability. + +Single-Trajectory Distributionally Robust Reinforcement Learning +Algorithm 2 Distributionally Robust Q-learning with KL divergence +1: Input: Exploration rate ϵ, Learning rates {ζi(n)}i∈[3] +2: Init: Q(s, a) = 0, ∀(s, a) ∈ S × A +3: for n = 1, 2, · · · do +4: +Observe the state sn, execute the action an = maxa∈A Q(sn, a) using ϵ greedy policy +5: +Observe the reward rn and next state s′ +n +6: +Update Zn,1, Zn,2 via +Zn+1,1(sn, an) = (1 − ζ1(n))Zn,1(sn, an) + ζ1(n)e−yn/βn(sn,an) +Zn+1,2(sn, an) = (1 − ζ1(n))Zn,2(sn, an) + ζ1(n)yne−yn/βn(sn,an), +where yn = maxa∈A Qn(s′ +n, a). +7: +Update βn via βn+1(s, a) = (βn(sn, an) − ζ2(n)Dn+1(sn, an))+, +where Dn+1(sn, an) = ρ + log (Zn,1(sn, an)) + β−1 +n (sn, an)Zn,2(sn, an)/Zn,1(sn, an). +8: +Update Qn via Qn+1(sn, an) = (1−ζ3(n))Qn(sn, an)+ζ3(n)(rn−γ(βn(sn, an) log Zn,1(sn, an)+βn(sn, an)ρ)). +9: end for +can be characterized by the Bellman optimality equation, +Q⋆(s, a) = r(s, a) − r⋆ + EP +� +max +a′∈A Q⋆(s′, a′) +� +. +4.2. Distributionally Robust average-reward MDPs +We are interested in the DR variant of the above setting. +To be specific, we define the DR average reward as the +worst-case average reward over an ambiguity set P, +rrob(π, P) = inf +P ∈P lim +n→∞ +1 +n +n +� +t=1 +Eπ,P [r(st, at)|s1 = s], +and rrob,⋆ = supπ∈Π rrob(π, P). We then only consider the +case where the MDPs induced by the policy π and any of the +transition model P in the ambiguity set P is communicating. +Moreover, we assume the ambiguity set P is constructed in a +(s, a)-rectangular manner following Section 2.3. Similarly, +we can define the DR relative action-value function as +Qrob,⋆(s, a) := sup +π∈Π +inf +P ∈P Eπ,P +� ∞ +� +t=1 +(rt(st, at) − r(π, P))|s, a +� +, +where conditional on the first state is a and the first action +is a. Wang et al. (2023) proves a DR Bellman optimality +equation to characterize the relationship between optimal +DR average reward and optimal DR action-value function, +Qrob,⋆ = r(s, a) − rrob,⋆ + inf +P ∈P EP +� +max +a∈A Qrob,⋆(s′, a) +� +. +(25) +We ignore the superscript rob for notation simplicity. +4.3. Review of Differential Q-Learning +For DR discounted MDPs, it has been shown that the value +function is the unique fixed-point of the DR discounted +Bellman operator (Iyengar, 2005). However, there is no +such guarantee in the DR average-reward case. Our goal is +to design an algorithm for learning the optimal policy under +this setting. In this subsection, we review an average-reward +RL algorithm, Differential Q-Learning (Wan et al., 2021), +as we will develop our DR variant of it by instantiating our +three timescales framework. We can also develop a DR +variant of other average-reward RL algorithms, including +RVI-Q learning (Abounadi et al., 2001). The differential Q- +learning shares the same spirit with the vanilla Q-learning +by incrementally updating the Q function from the temporal +difference. In particular, +Qn+1(sn, an) = Qn(sn, an) + αnδn +where αn is learning rate and δn is the temporal-difference +(TD), +δn = rt − rn + max +a∈A Qn(s′ +t, a) − Qn(sn, an). +The innovation of the differential Q-learning is mainly on +using the rn as the reference for the Q value, which is +updated by +rn+1 = rn + ναnδn, +for some positive constant ν > 0. Compared with the RVI- +Q learning, the differential Q-learning algorithm is much +easier to use and robust to the choice of hyperparameter. +4.4. Algorithmic Design +In this subsection, we mainly discuss the solution to χ2 +divergence-based ambiguity set and defer the KL case in +Appendix B. To develop a DR variant of differential Q- +learning, note that the DR temporal difference error implied +by Equation 25 is +δn = rn − ¯rn + inf +P ∈P EP [max +a∈A Qn(s′ +n, a)] − Qn(sn, an). + +Single-Trajectory Distributionally Robust Reinforcement Learning +(a) Environment. +(b) ρ = 0.1 +(c) ρ = 0.5 +(d) ρ = 1.0 +(e) ρ = 1.5 +Figure 1. The Windy Beach environment and the learned policies for different ρ’s. Notice that the policy with ρ = 0.1 is the same as the +risk-neutral optimal policy. +0 +1 +2 +3 +Million Steps +0.5 +0.0 +0.5 +1.0 +1.5 +Value += 0.1 +0 +1 +2 +3 +Million Steps +2 +1 +0 +1 += 0.5 +0 +1 +2 +3 +Million Steps +4 +3 +2 +1 +0 +1 += 1.0 +0 +1 +2 +3 +Million Steps +6 +4 +2 +0 +2 += 1.5 +Figure 2. The training curves in the Windy Bench environment. Each curve is averaged over 10 random seeds and shaded by their standard +deviations. The dashed line is the optimal robust value. +Our three timescale framework comes into the role of solv- +ing the infimum over the ambiguity set. To be specific, we +use the same inner two timescales as in Equation 11, 12, +and 13. Then we design the third loop using the template +provided by the differential Q-learning, i.e., we update ¯rn +to approximate r⋆, +¯rn+1(sn, an) = ¯rn(sn, an) + νζ3(n)¯δn, +(26) +where ν > 0 is some learning rate and the empirical TD +error as +¯δn = rn − ¯rn(s, a) − Qn(s, a) − ηn + +� +1 + ρ +� +Zn,2(sn, an). +We modify the update for the Q function as +Qn+1(sn, an) = Qn(sn, an) + ζ3(n)¯δn. +(27) +Using the argument established by our three timescales +stochastic approximation framework, we can prove the a.s. +convergence of our proposed algorithms, which are deferred +in Appendix B.1. We also defer the discussion about the +assumptions needed for the a.s. convergence as they are +standard and shared by RVI-Q learning and differential Q- +learning algorithms. +Theorem 4.1 (Informal). Under proper assumptions, +(Qn, ¯rn) in our Algorithms 3 and 4 converge to the (Q⋆, r⋆) +a.s.. +5. Experiments +We present the experimental results on the discounted re- +ward setting and refer to Appendix 5.2 for the results on the +average reward setting. +5.1. Discounted Reward +We use the Windy Beach task as the test-bed for our pro- +posed DRQ algorithm. Suppose a robot is on the beach. +It starts from the initial state of (2, 0) and navigates to the +goal state of (2, 3). For each step, the robot can take action +and move to an adjacent grid. However, the wind on the +beach is strong and will make the robot move in an arbi- +trary direction with a probability p. If it arrives at the goal +state, it will receive a reward of +5, and the task will end +if it is unfortunate to be hit by a wave (in the region of +{(3, j) | 0 ≤ j ≤ 3}), it will receive a penalty of −1. +We train the agent in a nominal environment with p = 0.5 +and evaluate its performance in the perturbed environments. +The uncertainty set radiuses are set to ρ = 0.1, 0.5, 1.0, 1.5 +to demonstrate different levels of robustness. For each run, +we train the DRQ algorithm for 3 million steps. For each +step, we explore the environment with a ϵ-greedy strategy, +where ϵ = 0.1 in our experiment. If the robot has reached +the goal state, the environment will be reset to the initial +state. Otherwise, the robot still explores until the episode +length is up to 20, in which case we will also reset the envi- + +Single-Trajectory Distributionally Robust Reinforcement Learning +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +p +1 +0 +1 +2 +3 +4 +5 +Return +0.1 +0.5 +1.0 +1.5 +(a) Return +0.4 +0.5 +0.6 +0.7 +0.8 +0.9 +p +0 +5 +10 +15 +20 +25 +30 +Steps +0.1 +0.5 +1.0 +1.5 +(b) Episode length +Figure 3. Averaged return and steps in the perturbed Windy Beach +environments. +ronment to avoid invalid wandering. Although the Robbins- +Monro step sizes (i.e., Assumption 3.7) are essential for +theoretical convergence, we empirically find that the con- +stant step sizes work well. Specially, we use ζ1(t) = 0.03, +ζ2(t) = 0.01 and ζ3(t) = 0.003 in our experiment to track +three different time scales. The discount factor γ = 0.9. +Each experiment is repeated with 10 random seeds, where +Q is initialized by randomly sampling from N(0, 1). The +training curves are shown in Figure 2, where we compare +the estimated value maxa Q(s0, a) (shaded by the standard +deviation) and the optimal robust value V ∗(s0) (the dashed +line) of the initial state. The results show estimated value +converges to the optimal value quickly, which is consistent +with our analysis. +To evaluate the robustness of the learned policies, we +compare their returns in the perturbed environments with +p ∈ {0.4, 0.5, 0.6, 0.7, 0.8, 0.9}. By saying “return”, we +mean G = �T −1 +t=0 rt, where T is the first time step reach- +ing the goal state. We run the learned policies over 100 +episodes for each setting and compare the return distribu- +tion in Figure 3a. Interestingly, we find that the robust +policies outperform the risk-neutral policy (the same as the +policy with ρ = 0.1) even in the nominal environment. +This is because the returns do not take the discount γ into +consideration. To better understand this phenomenon, we +compare the episode lengths T in Figure 3b, showing that +T increases with ρ. Longer time to complete tasks conflicts +with discount criteria, but it is preferred in conservative sce- +narios. As for the different ρ’s, we find that ρ = 1.0 is the +best within a relatively wide range (p ∈ {0.6, 0.7, 0.8}). In +contrast, ρ = 1.5 is preferred in the environment of extreme +uncertainty with p = 0.9. It suggests that DRO provides a +trade-off for different preferences of robustness, overcoming +the over-conservatism of the other robustness notions. +5.2. Average Reward +In this part, we consider the Access-Control Queuing +task (Sutton & Barto) to evaluate the average-reward DRQ- +learning. A queue receives 4 different classes of consumers +with uniform probability. Depending on their priority, they +will pay different fees of 1, 2, 4, and 8. There are 10 servers +to serve the consumers. At each step, we can take action +to accept the arriving customer and send her to any free +server, in which case the fees will be paid according to her +priority. Otherwise, we can also reject her and earn nothing. +If a server is busy, it will have a probability of p being free +in the next time step. In the nominal environment, we set +p = 0.09. A policy needs to choose whether to accept or +reject depending on the number of current free servers and +the priority of the arriving customer, in order to maximize +the long-run average reward. +In this task, we use the same experimental setup as the +Windy Beach task for the constant step sizes, ϵ-greedy ex- +ploration, and Q initialization. Due to the large state space +and the lack of discount, we need more samples for the +Access-Control Queuing task. Hence we train the average- +reward DRQ-learning algorithm over 15 million. The con- +stant in the Differential Q-learning is set as η = 1, and we +do not find the performance is sensitive to it. We use the +differential value of two reference states to show the con- +vergence of the algorithm. Specifically, we choose the state +s0 that has no free server with an arriving customer whose +priority is 1, and the state s1 where all servers are free and +the arriving customer’s priority is 8. The differential value +of these states, i.e. ∆V = maxa Q(s1, a)−maxa Q(s0, a), +are expected to be the largest among all the state pairs. The +training curves shown in Figure 5 illustrate that our proposed +algorithm consistently converges to the optimal differential + +Single-Trajectory Distributionally Robust Reinforcement Learning +value. The learned polices are visualized in Figure 6. +Again, we perturb the with p ∈ {0.01, 0.03, 0.06} and eval- +uate the robustness of the learned polices. The smaller p +represents a strain on service resources. We run the algo- +rithm for 100 time steps, calculate the average reward, and +repeat the experiment for 100 times. The results are shown +in Figure 3. The distributionally robust policies perform +better in environments where p ≤ 0.06, demonstrated their +superiority in resource-constrained conditions. +0.01 +0.03 +0.06 +0.09 +p +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +3.0 +3.5 +4.0 +Average Reward +non-robust +0.1 +0.4 +0.8 +1.2 +Figure 4. Averaged return and steps in the perturbed environments. +References +Abdullah, M. A., Ren, H., Ammar, H. B., Milenkovic, V., +Luo, R., Zhang, M., and Wang, J. Wasserstein Robust +Reinforcement Learning, 2019. URL http://arxiv. +org/abs/1907.13196. +Abounadi, J., Bertsekas, D., and Borkar, V. S. Learning +algorithms for markov decision processes with average +cost. SIAM Journal on Control and Optimization, 40(3): +681–698, 2001. +Bertsimas, D. and Sim, M. The price of robustness. Opera- +tions research, 52(1):35–53, 2004. +Blanchet, J. H. and Glynn, P. W. Unbiased monte carlo for +optimization and functions of expectations via multi-level +randomization. In 2015 Winter Simulation Conference +(WSC), pp. 3656–3667. IEEE, 2015. +Borkar, V. S. Stochastic approximation: a dynamical sys- +tems viewpoint, volume 48. Springer, 2009. +Borkar, V. S. and Meyn, S. P. The ode method for con- +vergence of stochastic approximation and reinforcement +learning. SIAM Journal on Control and Optimization, 38 +(2):447–469, 2000. +Borkar, V. S. and Soumyanatha, K. An analog scheme for +fixed point computation. i. theory. IEEE Transactions on +Circuits and Systems I: Fundamental Theory and Appli- +cations, 44(4):351–355, 1997. +Delage, E. and Ye, Y. Distributionally robust optimization +under moment uncertainty with application to data-driven +problems. Operations research, 58(3):595–612, 2010. +Duchi, J. C. and Namkoong, H. Learning models with uni- +form performance via distributionally robust optimization. +The Annals of Statistics, 49(3):1378–1406, 2021. +El Ghaoui, L. and Nilim, A. Robust solutions to markov +decision problems with uncertain transition matrices. Op- +erations Research, 53(5):780–798, 2005. +Ganin, Y., Ustinova, E., Ajakan, H., Germain, P., Larochelle, +H., Laviolette, F., Marchand, M., and Lempitsky, V. +Domain-adversarial training of neural networks. The +journal of machine learning research, 17(1):2096–2030, +2016. +Goodfellow, I. J., Shlens, J., and Szegedy, C. Explain- +ing and harnessing adversarial examples. arXiv preprint +arXiv:1412.6572, 2014. +Goyal, V. and Grand-Clement, J. Robust markov decision +processes: Beyond rectangularity. Mathematics of Oper- +ations Research, 2022. +Ho, C. P., Petrik, M., and Wiesemann, W. Partial policy +iteration for l1-robust markov decision processes. J. Mach. +Learn. Res., 22:275–1, 2021. +Hu, Z. and Hong, L. J. Kullback-leibler divergence con- +strained distributionally robust optimization. Available at +Optimization Online, pp. 1695–1724, 2013. +Iyengar, G. N. Robust dynamic programming. Mathematics +of Operations Research, 30(2):257–280, 2005. +Lillicrap, T. P., Hunt, J. J., Pritzel, A., Heess, N., Erez, +T., Tassa, Y., Silver, D., and Wierstra, D. Continuous +control with deep reinforcement learning. URL http: +//arxiv.org/abs/1509.02971. +Lim, S. H., Xu, H., and Mannor, S. Reinforcement learning +in robust markov decision processes. Advances in Neural +Information Processing Systems, 26, 2013. +Liu, Z., Bai, Q., Blanchet, J., Dong, P., Xu, W., Zhou, Z., +and Zhou, Z. Distributionally robust q-learning. In In- +ternational Conference on Machine Learning, pp. 13623– +13643. PMLR, 2022. +Ma, X., Liang, Z., Xia, L., Zhang, J., Blanchet, J., Liu, M., +Zhao, Q., and Zhou, Z. Distributionally robust offline re- +inforcement learning with linear function approximation. +arXiv preprint arXiv:2209.06620, 2022. + +Single-Trajectory Distributionally Robust Reinforcement Learning +0 +5 +10 +15 +Million Steps +0 +5 +10 +15 +20 +Value += 0.1 +0 +5 +10 +15 +Million Steps +0 +5 +10 +15 +20 += 0.4 +0 +5 +10 +15 +Million Steps +0 +5 +10 +15 +20 += 0.8 +0 +5 +10 +15 +Million Steps +0 +5 +10 +15 +20 +25 +30 += 1.2 +Figure 5. Averaged return and steps in the perturbed environments. +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +4 +8 +Number of free servers +Priority +(a) Non-robust +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +4 +8 +Number of free servers +Priority +(b) ρ = 0.1 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +4 +8 +Number of free servers +Priority +(c) ρ = 0.4 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +4 +8 +Number of free servers +Priority +(d) ρ = 0.8 +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +1 +2 +4 +8 +Number of free servers +Priority +(e) ρ = 1.2 +Figure 6. The non-robust and the learned policies for different ρ’s in the Access-Control Queuing task. + +Single-Trajectory Distributionally Robust Reinforcement Learning +Mannor, S., Simester, D., Sun, P., and Tsitsiklis, J. N. Bias +and variance in value function estimation. In Proceedings +of the twenty-first international conference on Machine +learning, pp. 72, 2004. +Mnih, V., Kavukcuoglu, K., Silver, D., Rusu, A. A., Veness, +J., Bellemare, M. G., Graves, A., Riedmiller, M., Fidje- +land, A. K., Ostrovski, G., et al. Human-level control +through deep reinforcement learning. nature, 518(7540): +529–533, 2015. +Neufeld, A. and Sester, J. Robust q-learning algorithm for +markov decision processes under wasserstein uncertainty. +ArXiv, abs/2210.00898, 2022. +Nilim, A. and El Ghaoui, L. Robust control of markov +decision processes with uncertain transition matrices. Op- +erations Research, 53(5):780–798, 2005. +Panaganti, K., Xu, Z., Kalathil, D., and Ghavamzadeh, M. +Robust Reinforcement Learning using Offline Data. URL +http://arxiv.org/abs/2208.05129. +Puterman, M. L. +Markov decision processes: discrete +stochastic dynamic programming. John Wiley & Sons, +2014. +Satia, J. K. and Lave Jr, R. E. Markovian decision pro- +cesses with uncertain transition probabilities. Operations +Research, 21(3):728–740, 1973. +Shapiro, A. Distributionally robust stochastic programming. +SIAM Journal on Optimization, 27(4):2258–2275, 2017. +Shi, L. and Chi, Y. Distributionally Robust Model-Based Of- +fline Reinforcement Learning with Near-Optimal Sample +Complexity. URL http://arxiv.org/abs/2208. +05767. +Silver, D., Huang, A., Maddison, C. J., Guez, A., Sifre, L., +Van Den Driessche, G., Schrittwieser, J., Antonoglou, I., +Panneershelvam, V., Lanctot, M., et al. Mastering the +game of go with deep neural networks and tree search. +nature, 529(7587):484–489, 2016. +Sinha, A., Namkoong, H., and Duchi, J. Certifiable distribu- +tional robustness with principled adversarial training. In +International Conference on Learning Representations, +2018. URL https://openreview.net/forum? +id=Hk6kPgZA-. +Sutton, R. S. and Barto, A. G. Reinforcement Learning: +An Introduction. Adaptive Computation and Machine +Learning. MIT Press. ISBN 978-0-262-19398-6. +Tram`er, F., Kurakin, A., Papernot, N., Goodfellow, I., +Boneh, D., and McDaniel, P. +Ensemble adversar- +ial training: +Attacks and defenses. +arXiv preprint +arXiv:1705.07204, 2017. +Vinyals, O., Babuschkin, I., Czarnecki, W. M., Mathieu, M., +Dudzik, A., Chung, J., Choi, D. H., Powell, R., Ewalds, T., +Georgiev, P., Oh, J., Horgan, D., Kroiss, M., Danihelka, I., +Huang, A., Sifre, L., Cai, T., Agapiou, J. P., Jaderberg, M., +Vezhnevets, A. S., Leblond, R., Pohlen, T., Dalibard, V., +Budden, D., Sulsky, Y., Molloy, J., Paine, T. L., Gulcehre, +C., Wang, Z., Pfaff, T., Wu, Y., Ring, R., Yogatama, +D., W¨unsch, D., McKinney, K., Smith, O., Schaul, T., +Lillicrap, T., Kavukcuoglu, K., Hassabis, D., Apps, C., +and Silver, D. Grandmaster level in StarCraft II using +multi-agent reinforcement learning. 575(7782):350–354, +2019. doi: 10.1038/s41586-019-1724-z. +Wan, Y., Naik, A., and Sutton, R. S. Learning and planning +in average-reward markov decision processes. In Inter- +national Conference on Machine Learning, pp. 10653– +10662. PMLR, 2021. +Wang, Y., Velasquez, A., Atia, G., Prater-Bennette, A., +and Zou, S. Robust average-reward markov decision +processes, 2023. URL https://arxiv.org/abs/ +2301.00858. +Watkins, C. J. and Dayan, P. Q-learning. Machine learning, +8(3):279–292, 1992. +Wiesemann, W., Kuhn, D., and Rustem, B. Robust markov +decision processes. Mathematics of Operations Research, +38(1):153–183, 2013. +Xu, H. and Mannor, S. +Distributionally robust markov +decision processes. Advances in Neural Information Pro- +cessing Systems, 23, 2010. +Yang, I. Wasserstein distributionally robust stochastic con- +trol: A data-driven approach. +IEEE Transactions on +Automatic Control, 66:3863–3870, 2018. +Yang, W., Zhang, L., and Zhang, Z. Towards Theoretical +Understandings of Robust Markov Decision Processes: +Sample Complexity and Asymptotics. URL http:// +arxiv.org/abs/2105.03863. +Zhang, H., Yu, Y., Jiao, J., Xing, E., El Ghaoui, L., and +Jordan, M. Theoretically principled trade-off between +robustness and accuracy. In International conference on +machine learning, pp. 7472–7482. PMLR, 2019. +Zhou, Z., Zhou, Z., Bai, Q., Qiu, L., Blanchet, J., and +Glynn, P. +Finite-Sample Regret Bound for Distribu- +tionally Robust Offline Tabular Reinforcement Learn- +ing. In Proceedings of The 24th International Confer- +ence on Artificial Intelligence and Statistics, pp. 3331– +3339. PMLR. URL https://proceedings.mlr. +press/v130/zhou21d.html. + +Single-Trajectory Distributionally Robust Reinforcement Learning +A. Notations +We fix some notations that will be used in the appendix. For a positive integer n, [n] denotes the set {1, 2, · · · , n}. |A| +denotes the cardinality of the set A. We adopt the standard asymptotic notations: for two non-negative sequences an and bn, +an = O(bn) iff lim supn→∞ an/bn < ∞. ∆d is the simplex on a d dimensional space, i.e., ∆d = {x : �d +i=1 xi = 1, xi ≥ +0, ∀i ∈ [d]}. For any vector x ∈ Rd and any semi-positive matrix A ∈ Rd×d with A ⪰ 0, we denote ∥x∥A := +√ +x⊤Ax. ∥·∥ +is Euclidean norm. +B. Differential Distributionally Robust Q Learning with Single Trajactory +B.1. Algorithmic Design +Algorithm 3 Differential DR Q-Learning with χ2 divergence (DDRQ) +1: Input: Exploration rate ϵ, Learning rates {ζi(n)}i∈[3] and ν +2: Init: Q(s, a) = 0, +∀(s, a) ∈ S × A +3: for n = 1, 2, · · · do +4: +Observe the state sn, execute the action an = arg maxa∈A Qn(sn, a) using ϵ greedy policy +5: +Observe the reward rn and next state s′ +n +6: +Update Zn,1, Zn,2 follow +Zn+1,1(sn, an) = (1 − ζ1(n))Zn,1(sn, an) + ζ1(n)(ηn,1(sn, an) − max +a +Qn(s′ +n, a))+, +Zn+1,2(sn, an) = (1 − ζ1(n))Zn,2(sn, an) + ζ1(n)(ηn,1(sn, an) − max +a +Qn(s′ +n, a))2 ++. +7: +Update ηn via +ηn+1(sn, an) = (1 − ζ2(n))ηn(sn, an) + ζ2(n)(1 − +� +ρ + 1 Zn,1 +� +Zn,2 +). +8: +Update ¯rn and Qn by +¯rn+1(sn, an) = ¯rn(sn, an) + νζ3(n)¯δn, +and +Qn+1(sn, an) = Qn+1(sn, an) = Qn(sn, an) + ζ3(n)¯δn, +where +¯δn = rn − ¯rn(s, a) − Qn(sn, an) − ηn(sn, an) + +� +1 + ρ +� +Zn,2(s, a). +9: end for + +Single-Trajectory Distributionally Robust Reinforcement Learning +Algorithm 4 Differential DR Q-Learning with KL divergence +1: Input: Exploration rate ϵ, Learning rates {ζi(n)}i∈[3] and ν +2: Init: Q(s, a) = 0, +∀(s, a) ∈ S × A +3: for n = 1, 2, · · · do +4: +Observe the state sn, execute the action an = arg maxa∈A Qn(sn, a) using ϵ greedy policy +5: +Observe the reward rn and next state s′ +n +6: +Update Zn,1, Zn,2 via +Zn+1,1(sn, an) = (1 − ζ1(n))Zn,1(sn, an) + ζ1(n)e−yn/βn(sn,an) +Zn+1,2(sn, an) = (1 − ζ1(n))Zn,2(sn, an) + ζ1(n)yne−yn/βn(sn,an), +where yn = maxa∈A Qn(s′ +n, a). +7: +Update βn via +βn+1(sn, an) = (βn(sn, an) − ζ2(n)Dn+1(sn, an))+, +where +Dn+1(sn, an) = ρ + log (Zn,1(sn, an)) + β−1 +n (sn, an)Zn,2(sn, an)/Zn,1(sn, an). +8: +Update ¯rn and Qn by +¯rn+1(sn, an) = ¯rn(sn, an) + νζ3(n)¯δn, +and +Qn+1(sn, an) = Qn+1(sn, an) = Qn(sn, an) + ζ3(n)¯δn, +where +¯δn = rn − ¯rn(s, a) − Qn(sn, an) − βn(sn, an) log Zn,1(sn, an) − βn(sn, an)ρ. +9: end for +C. Multiple Timescale Convergence +Lemma C.1 (Discrete Gronwall inequality). Let {xn, n ≥ 0} (resp. {an, n ≥ 0} ) be nonnegative (resp. positive) sequences +and C, L ≥ 0 scalars such that for all n, +xn+1 ≤ C + L +� +n +� +m=0 +amxm +� +. +Then for Tn = �n +m=0 am, +xn+1 ≤ CeLTn. +Lemma C.2 (Gronwall inequality). For continuous u(·), v(·) ≥ 0 and scalars C, K, T ≥ 0 +u(t) ≤ C + K +� t +0 +u(s)v(s)ds, +∀t ∈ [0, T], +implies +u(t) ≤ CeK +� T +0 v(s)ds, +∀t ∈ [0, T]. +C.1. Stability Criterion +Consider the stochastic approximation scheme zn ∈ RN given by +zn+1 = zn + an [g (zn) + Mn+1] , + +Single-Trajectory Distributionally Robust Reinforcement Learning +with the following assumptions: +Assumption C.3. g : RN → RN is Lipschitz. +Assumption C.4. The sequence {an} ⊂ R satisfies � +n an = ∞, � +n a2 +n < ∞. +Assumption C.5. {Mn} is a martingale difference sequence with respect to the filtration Fn = σ (zm, Mm, m ≤ n), there +exists K > 0 such that E +� +∥Mn+1∥2 | Fn +� +≤ K(1 + ∥zn∥2) a.s.. +Assumption C.6. The functions gd(z) = g(dz)/d, d ≥ 1 satisfy gd(z) → g∞(z) as d → ∞ uniformly on compacts for +some continuous function g∞ : RN → RN. In addition, the ODE +˙z(t) = g∞(z(t)) +has the origin as its globally asymptotically stable equilibrium. +We then have +Lemma C.7. Under Assumptions C.3 to C.5, we have supn ∥zn∥ < ∞ a.s. +See Section 2.2 and 3.2 in Borkar (2009) for the proof. As the stability proofs in Section 3.2 of (Borkar, 2009) are path-wise, +we can apply this result to analyze multiple timescales dynamic. +C.2. Three Timescales Convergence Criterion +Consider the scheme +xn+1 = xn + an +� +f (xn, yn, zn) + M (1) +n+1 +� +(28) +yn+1 = yn + bn +� +g (xn, yn, zn) + M (2) +n+1 +� +(29) +zn+1 = zn + cc +� +h (xn, yn, zn) + M (3) +n+1 +� +(30) +where f : Rd+k+p → Rd, g : Rd+k+p → Rk, h : Rd+k+p → Rp, {M (i) +n }, i = 1, 2, 3 are martingale difference sequences +with respect to the σ-fields Fn = σ +� +xm, ym, M (1) +m , M (2) +m , M (3) +m ; m ≤ n +� +, and the an, bn, cn form decreasing stepsize +sequences. +It is instructive to compare the stochastic update algorithms from Equations 28 to 30 with the following o.d.e., +˙x(t) = 1 +af(x(t), y(t), z(t)), +˙y(t) = 1 +b g(x(t), y(t), z(t)), +˙z(t) = 1 +c h(x(t), y(t), z(t)), +in the limit that a, b, c → 0 and a = o(b), c = o(b). +We impose the following assumptions. +Assumption C.8. f and g is L-Lipschitz map for some 0 < L < ∞ and h is bounded. +Assumption C.9. +� +n +an = +� +n +bn = +� +n +cn = ∞, +� +n +(a2 +n + b2 +n + c2 +n) < ∞, and bn = o(an), cn = o(bn). +Assumption C.10. For i = 1, 2, 3 and n ∈ N+, {M (i) +n } is a martingale differeence sequence with respect to the increasing +family of σ-fields Fn. Furthermore, there exists some K > 0, such that for i = 1, 2, 3 and n ∈ N+, +E[∥M (i) +n+1∥2|Fn] ≤ K(1 + ∥xn∥2 + ∥yn∥2 + ∥zn∥2). +Assumption C.11. supn(∥xn∥ + ∥yn∥ + ∥zn∥) < ∞, a.s.. + +Single-Trajectory Distributionally Robust Reinforcement Learning +Assumption C.12. For each y ∈ Rk and z ∈ Rp, ˙x(t) = f(x(t), y, z) has a globally asymptotically stable equilibrium +λ1(y, z), where λ1 : Rk+p → Rd is a L-Lipschitz map for some L > 0. +Assumption C.13. For each z ∈ Rp, ˙y(t) = g(λ1(y(t), z), y(t), z) has a globally asymptotically stable equilibrium λ2(z), +where λ2 : Rp → Rk is a L-Lipschitz map for some L > 0. +Assumption C.14. ˙z(t) = h(λ1(z(t)), λ2(z(t)), z(t)) has a globally asymptotically stable equilibrium z⋆. +Assumptions C.8, C.9, C.10 and C.11 are necessary for the a.s. convergence for each timescale itself. Moreover, Assump- +tion C.11 itself requires Assumptions like C.8, C.9, C.10, with an extra assumption like Assumption C.5. Instead, we need +to prove the boundedness for each timescale, thus the three timescales version is as follow +Assumption C.15. The ODE +˙z(t) = f∞(x(t), y, z) +˙y(t) = g∞(λ1(y(t), z), y(t), z) +˙z(t) = h∞(λ1(z(t)), λ2(z(t)), z(t)) +all have the origin as their globally asymptotically stable equilibrium for each y ∈ Rk and z ∈ Rp, where +f∞ = lim +d→∞ +f(dx) +d +, +g∞ = lim +d→∞ +g(dx) +d +, and h∞ = lim +d→∞ +h(dx) +d +. +We have the following results, which appears as a three timescales extension of Lemma 6.1 in (Borkar, 2009) and serves as a +auxiliary lemma for the our a.s. convergence. +Lemma C.16. Under the assumptions C.8, C.9, C.10 and C.11. (xn, yn, zn) → {λ′ +1(z), λ′ +2(z), z : z ∈ Rp} a.s.. +Proof. Rewrite equations 29 and 30 as +yn+1 = yn + an +� +ϵ1,n + M (2)′ +n+1 +� +zn+1 = zn + an +� +ϵ2,n + M (3)′ +n+1 +� +, +where ϵ1,n = bn +an g(xn, yn, zn), ϵ2,n = cn +an h(xn, yn, zn), M (2)′ +n+1 = bn +an M (2) +n+1, M (3)′ +n+1 = cn +an M (3) +n+1. Note that ϵ1,n, ϵ2,n → 0 +as n → ∞. Consider them as the special case in the third extension in Section 2.2 in (Borkar, 2009) and then we can +conclude that (xn, yn, zn) converges to the internally chain transitive invariant sets of the o.d.e., +˙x(t) = h(x(t), y(t), z(t)) +˙y(t) = 0 +˙z(t) = 0, +which implies that (xn, yn, zn) → {λ′ +1(y, z), y, z : y ∈ Rk, z ∈ Rp}. +Rewrite Equation 30 again as +zn+1 = zn + bn +� +ϵ′ +2,n + M (3)′′ +n+1 +� +, +where ϵ′ +2,n = cn +bn h(xn, yn, zn) and M (3)′′ +n+1 = cn +bn M (3) +n+1. We use the same extension again and can conclude that (xn, yn, zn) +converges to the internally chain transitive invariant sets of the o.d.e., +˙y(t) = g(λ′ +1(y(t)), y(t), z(t)) +˙z(t) = 0. +Thus (xn, yn, zn) → {λ1(y), λ2(z), z : z ∈ Rp}. +Theorem C.17. Under the assumptions C.8 to C.15, (xn, yn, zn) → (λ1(z∗), λ2(z∗), z∗). + +Single-Trajectory Distributionally Robust Reinforcement Learning +Proof. Let t(0) = 0 and t(n) = �n−1 +i=0 ci for n ≥ 1. Define the piecewise linear continuous function ˜z(t), t ≥ 0 +where ˜z(t(n)) = zn and ˜z(t) = +t(n+1)−t +t(n+1)−t(n)zn+1 + +t−t(n) +t(n+1)−t(n)zn for t ∈ [t(n), t(n + 1)] with any n ∈ N. Let +ψn = �n−1 +i=0 ciM (3) +i+1, n ∈ N+. For any t ≥ 0, denote [t] = max{s(n) : s(n) ≤ t}. Then for n, m ≥ 0, we have +˜z(t(n + m)) = ˜z(t(n)) + +m−1 +� +k=1 +cn+kh(xn+k, yn+k, zn+k) + (ψm+n+1 − ψn) += ˜z(t(n)) + +� t(n+m) +t(n) +h(λ1(z(s)), λ2(z(s)), z(s))ds ++ +� t(n+m) +t(n) +(h(λ1(z([s])), λ2(z([s])), z([s])) − h(λ1(z(s)), λ2(z(s)), z(s)))ds ++ +m−1 +� +k=0 +cn+k(h(xn+k, yn+k, zn+k) − h(λ1(zn+k), λ2(zn+k), zn+k)) ++ (ψn+m+1 − ψn). +(31) +We further define zt(n)(t) as the trajectory of ˙z(t) = g(λ1(z(t)), λ2(z(t)), z(t)) with zt(n)(t(n)) = ˜z(t(n)). +zt(n)(t(n + m)) = ˜z(t(n)) + +� t(n+m) +t(n) +h(λ1(zt(n)(s)), λ2(zt(n)(s)), zt(n)(s))ds. +(32) +Taking the difference between Equation 31 and the Equation 32 we have +|˜z(t(n + m)) − zt(n)(t(n + m))| += +m−1 +� +k=0 +cn+k(h(λ1(˜z(t + k)), λ2(˜z(t + k)), ˜z(t + k)) − h(λ1(z(t(n + k))), λ2(z(t(n + k))), z(t(n + k)))) +� +�� +� ++ | +� t(n+m) +t(n) +(h(λ1(z([t])), λ2(z([t])), z([t])) − h(λ1(z(s)), λ2(z(s)), z(s)))ds| +� +�� +� +I ++ | +m−1 +� +k=1 +cn+k(h(xn+k, yn+k, zn+k) − h(λ1(zn+k), λ2(zn+k), zn+k))| +� +�� +� +II ++ |ψn+m+1 − ψn| +� +�� +� +III +. +We analyze the I term. For notation simplicity we ignore the supsript t(n). +|h(λ1(z([t])), λ2(z([t])), z([t])) − h(λ1(z(t)), λ2(z(t)), z(t))| += |(h(λ1(z([t])), λ2(z([t])), z([t])) − h(λ1(z([t])), λ2(z([t])), z(t)))| ++ |(h(λ1(z([t])), λ2(z([t])), z(t)) − h(λ1(z([t])), λ2(z([t])), z([t])))| += |(h(λ1(z([t])), λ2(z([t])), z([t])) − h(λ1(z([t])), λ2(z(t)), z(t)))| ++ |h(λ1(z([t])), λ2(z([t])), z(t)) − h(λ1(z([t])), λ2(z([t])), z(t))| ++ |(h(λ1(z([t])), λ2(z([t])), z(t)) − h(λ1(z([t])), λ2(z([t])), z([t])))|. +(33) +By the Lipschitzness of the h we have +∥h(x) − h(0)∥ ≤ L∥x∥, +which implies +∥h(x)∥ ≤ ∥h(0)∥ + L∥x∥. + +Single-Trajectory Distributionally Robust Reinforcement Learning +∥zt(n)(t)∥ ≤ ∥˜z(s)∥ + +� t +s +∥h(zt(n)(s))∥ds +≤ ∥˜z(s)∥ + +� t +s +(∥h(0)∥ + L∥zt(n)(s)∥)ds +≤ (∥˜z(s)∥ + ∥h(0)∥T) + L +� t +s +∥zt(n)(s)∥ds. +By Gronwall’s inequality (Lemma C.2), we have +∥zt(n)(t)∥ ≤ (C + ∥h(0)∥T)eLT , +∀t ∈ [t(n), t(n + m)]. +Thus for all t ∈ [t(n), t(n + m)], we have +∥h(λ1(zt(n)(t)), λ2(zt(n)(t)), zt(n)(t))∥ ≤ CT := ∥h(0)∥ + L(C + ∥h(0)∥T)eLT < ∞, a.s.. +For any k ∈ [m − 1] and t ∈ [t(n + k), t(n + k + 1)], +∥zt(n)(t) − zt(n)(t(n + k))∥ ≤ ∥ +� t +t(n+k) +h(λ1(zt(n)(s)), λ2(zt(n)(s)), zt(n)(s))ds∥ +≤ CT (t − t(n + k)) +≤ CT a(n + k), +where the last inequality is from the construction of {t(n) : n ∈ N+}. Finally we can conclude +∥ +� t(n+m) +t(n) +(h(λ1(z([s])), λ2(z([s])), z(s)) − h(λ1(z([s])), λ2(z([s])), z([s])))ds∥ +≤ +� t(n+m) +t(n) +L∥z(s) − z([s])∥ds += L +m−1 +� +k=0 +� t(n+k−1) +t(n+k) +∥z(s) − z(t(n + k))∥ds +≤ CT L +m−1 +� +k=0 +c2 +n+k +≤ CT L +∞ +� +k=0 +c2 +n+k → 0, a.s.. +For the III term, it converges to zero from the martingale convergence property. +Subtracting equation 31 from 32 and take norms, we have +∥˜z(t(n + m)) − zt(n)(t(n + m))∥ +≤ L +m−1 +� +i=0 +cn+i∥˜z(t(n + i)) − zt(n)(t(n + i))∥ ++ CT L +� +k≥0 +c2 +n+k + sup +k≥0 +∥δn,n+k∥, a.s.. +Define KT,n = CT L � +k≥0 c2 +n+k + supk≥0∥δn,n+k∥. Note that KT,n → 0 a.s. n → ∞. Let ui = ∥˜x(t(n + i)) − +xt(n)(t(n + i))∥. Thus, above inequality becomes +um ≤ KT,n + L +m−1 +� +i=0 +cn+iui. + +Single-Trajectory Distributionally Robust Reinforcement Learning +Thus the above inequality becomes +z(t(n + m)) ≤ KT,n + L +m−1 +� +k=0 +ckz(t(n + k)). +Note that u0 = 0 and �m−1 +i=0 bi ≤ T, then using the discrete Gronwall lemma (Lemma C.1) we have +sup +0≤i≤m +ui ≤ KT,neLT . +Following the similar logic as in Lemma 1 in (Borkar, 2009), we can extend the above result to the case ∥˜z(t)−zt(n)(t)∥ → 0 +where t ∈ [0, T]. +Then using the proof of Theorem 2 of Chapter 2 in (Borkar, 2009), we get zn → z∗ a.s. and thus by Lemma C.16 the proof +can be concluded. +D. Convergence Condition +D.1. DR Q-Learning with χ2 divergence +We define the filtration generated by the historical trajectory, +Fn = σ({(st, at, s′ +t, rt)}t∈[n−1], sn, an). +For notation simplicity and the following analysis, we ignore the (sn, an) dependence and rewrite the three timescales +algorithms as follows: for (sn, an) we update as follow +Zn+1,1 = Zn,1 + ζ1(n)[f1(Zn,1, Zn,2, ηn, Qn) + M (1) +n+1], +(34) +Zn+1,2 = Zn,2 + ζ1(n)[f2(Zn,1, Zn,2, ηn, Qn) + M (2) +n+1], +(35) +βn+1 = Γβ [ηn + ζ2(n)f3(Zn,1, Zn,2, ηn, Qn)] , +(36) +Qn+1 = Qn + ζ3(n)[f4(Zn,1, Zn,2, ηn, Qn)]. +(37) +We first proceed by first identifying the terms in Equation 34 and 35 and studying the corresponding ODEs +˙Q(t) = 0, +˙η(t) = 0, +˙Z1(t) = f1(Z1(t), Z2(t), η(t), Q(t)). +˙Z2(t) = f2(Z1(t), Z2(t), η(t), Q(t)). +As f1 and f2 is in fact irrelavant to the Z2 and Z1, we analyze their equilibria seperately. +For ODE 34 and each ηn ∈ R, Qn ∈ S × A → R, it is easy to know there exists a unique global asymtotically stable +equilibrium Z⋆ +n,1 = λ1(η, y) = Es′∼P (·|sn,an)[(ηn − yn)+]. Similarly, For ODE 35 and each ηn ∈ R, Qn ∈ S × A → R, +there exists a unique global asymtotically stable equilibrium Z⋆ +n,2 = λ2(η, y) = Es′∼P (·|sn,an)[(ηn − yn)2 ++]. +Second, M (1) +n+1 = (ηn − yn)+ − Es′∼P (·|sn,an)[(ηn − yn)+] and M (2) +n+1 = (ηn − yn)2 ++ − Es′∼P (·|sn,an)[(ηn − yn)2 ++]. +Since ∥yn∥∞ ≤ ∥Qn∥∞ and (x − y)2 ++ ≤ x2 + y2 for any x, y, we have, +E[∥M (1) +n+1∥2|Fn] += E[∥(Zn,1 − Qn)+ − Es′∼P (·|sn,an)[(ηn − yn)+∥]Fn] +≤ 2(1 + ∥Zn,1∥2 + ∥Zn,2∥2 + ∥Qn∥2 + ∥ηn∥2). + +Single-Trajectory Distributionally Robust Reinforcement Learning +Similarly, we can conclude that E[∥M (2) +n+1∥2|Fn] ≤ K(1 + ∥Zn,1∥2 + ∥Zn,2∥2 + ∥Qn∥2 + ∥ηn∥2) for some K > 0. +Next we analyze the second loop. +˙Q(t) = 0 +˙η(t) = f3(λ1(η(t), Q(t)), λ2(η(t), Q(t)), η(t), Q(t)), +where +f3(λ1(η, Q), λ2(η, Q), η, Q) = η − +� +ρ + 1 · λ1(η, Q) +λ2(η, Q) − 1. +It is easy to know that the global convergence point is η⋆(t) = arg maxη{σχ2(Q, η)}. +Finally we arrive to the outer loop, i.e., +˙Q(t) = f4(λ1(Q(t)), λ2(Q(t)), λ3(Q(t)), Q(t)). +By using the dual form of χ2 DRO problem, we know that this is equivilant to +˙Q(t) = r + γ inf +P ∈P EP [max +a′ Q(s′, a′)] − Q(t), +for ambiguity set using χ2 divergence . +Denote H(t) = r + γ infP ∈P EP [maxa′ Q(s′, a′)] and thus we can rewrite the above ODE as +˙Q(t) = H(t) − Q(t). +Following , we consider its infity version, i.e., H∞(t) = limc→∞ H(ct)/c. +˙Q(t) = γ inf +P ∈P EP [max +a′ Q(s′, a′)] − Q(t). +This is a contraction by Theorem 3.2 in (Iyengar, 2005). By the proof in Section 3.2 in (Borkar & Meyn, 2000), we know +the contraction can lead to the global unique equilibrium point in the ode. Thus we finish verifying all the conditions in +Section C.2, which can lead to the following result. +Theorem D.1. (Z1,n, Z2,n, βn, Qn) → (λ1(Q∗), λ2(Q∗), λ3(Q∗), Q∗). +D.2. DR Q-Learning with KL divergence +We define the filtration generated by the historical trajectory, +Fn = σ({(st, at, s′ +t, rt)}t∈[n−1], sn, an). +For notation simplicity and the following analysis, we ignore the (sn, an) dependence and rewrite the three timescales +algorithms as follows: for (sn, an) we update as follow +Zn+1,1 = Zn,1 + ζ1(n)[f1(Zn,1, Zn,2, ηn, Qn) + M (1) +n+1], +(38) +Zn+1,2 = Zn,2 + ζ1(n)[f2(Zn,1, Zn,2, ηn, Qn) + M (2) +n+1], +(39) +βn+1 = (βn + ζ2(n)f3(Zn,1, Zn,2, βn, Qn))+, +(40) +Qn+1 = Qn + ζ3(n)[f4(Zn,1, Zn,2, βn, Qn)]. +(41) +We first proceed by first identifying the terms in Equation 34 and 35 and studying the corresponding ODEs +˙Q(t) = 0, +˙η(t) = 0, +˙Z1(t) = f1(Z1(t), Z2(t), η(t), Q(t)). +˙Z2(t) = f2(Z1(t), Z2(t), η(t), Q(t)). + +Single-Trajectory Distributionally Robust Reinforcement Learning +As f1 and f2 is in fact irrelavant to the Z2 and Z1, we analyze their equilibria seperately. +For ODE 38 and each ηn ∈ R, Qn ∈ S × A → R, it is easy to know there exists a unique global asymtotically stable +equilibrium Z⋆ +n,1 = λ1(η, y) = Es′∼P (·|s,a)[e−y/β]. Similarly, For ODE 39 and each ηn ∈ R, Qn ∈ S × A → R, there +exists a unique global asymtotically stable equilibrium Z⋆ +n,2 = λ2(η, y) = Es′∼P (·|s,a)[ye−y/β]. +Second, +M (1) +n+1 = e−yn(sn)/βn(s,a) − Es′∼P (s′·|sn,an)[e−yn/βn(s,a)] +and +M (2) +n+1 = yne−yn(sn)/βn(s,a) − Es′∼P (s′·|sn,an)[yne−yn/βn(s,a)]. +Since |e−yn/βn(s,a)| ≤ 1 and |Es′∼P (·|s,a)[e−yn/βn(s,a)]| ≤ 1, we have +E[∥M KL,(1) +n+1 +∥2|Fn] ≤ 4 ≤ 4(1 + ∥Z1,n∥2 + ∥Z2,n∥2 + ∥Qn∥2 + |βn|2). +Similarly, we can conclude that E[∥M (2) +n+1∥2|Fn] ≤ 4(1 + ∥Zn,1∥2 + ∥Zn,2∥2 + ∥Qn∥2 + ∥ηn∥2). +Next we analyze the second loop. +˙Q(t) = 0 +˙β(t) = f3(λ1(β(t), Q(t)), λ2(β(t), Q(t)), β(t), Q(t)), +where +f3(λ1(β, Q), λ2(β, Q), β, Q) = Γβ[β − δ + log (λ1(β)) + β−1λ2(β)/λ1(β)]. +It is easy to know that the global convergence point is β⋆(t) = arg maxβ{σKL(Q, β)}. +Finally we arrive to the outer loop, i.e., +˙Q(t) = f4(λ1(Q(t)), λ2(Q(t)), λ3(Q(t)), Q(t)). +By using the dual form of DRO problem with KL divergence, we know that this is equivilant to +˙Q(t) = r + γ inf +P ∈P EP [max +a′ Q(s′, a′)] − Q(t), +for ambiguity set using KL divergence . +Denote H(t) = r + γ infP ∈P EP [maxa′ Q(s′, a′)] and thus we can rewrite the above ODE as +˙Q(t) = H(t) − Q(t). +Following , we consider its infity version, i.e., H∞(t) = limc→∞ H(ct)/c. +˙Q(t) = γ inf +P ∈P EP [max +a′ Q(s′, a′)] − Q(t). +This is a contraction by Theorem 3.2 in (Iyengar, 2005). By the proof in Section 3.2 in (Borkar & Meyn, 2000), we know +the contraction can lead to the global unique equilibrium point in the ode. Thus we finish verifying all the conditions in +Section C.2, which can lead to the following result. +Theorem D.2. (Zn,1, Zn,2, βn, Qn) → (λ1(Q∗), λ2(Q∗), λ3(Q∗), Q∗). + +Single-Trajectory Distributionally Robust Reinforcement Learning +E. Convergence of DRO Differential Q-learning +E.1. Convergence Analysis +Assumption E.1. The MDP M has a single communicating class, that is, each state in M s accessible from every other +state under some deterministic stationary policy. +Assumption E.2. There exists a unique solution of q only up to a constant in Equation 25. +Assumption E.3. +� +n +an = +� +n +bn = +� +n +cn = ∞, +� +n +(a2 +n + b2 +n + c2 +n) < ∞, bn = o(an), cn = o(bn). +Assumption E.4. Let [·] denote the integer part of (·), for x ∈ (0, 1), +sup +i +c[xi] +ci +< ∞ +and +�[yi] +j=0 cj +�i +j=0 cj +→ 1 +uniformly in y ∈ [x, 1]. +Assumption E.5. There exists ∆ > 0 such that +lim inf +n→∞ +ν(n, s, a) +n + 1 +≥ ∆, +a.s., for all s ∈ S, a ∈ A. Furthermore, for all x > 0, let +N(n, x) = min +� +m > n : +m +� +i=n+1 +ci ≥ x +� +, +the limit +lim +n→∞ +�ν(N(n,x),a) +i=ν(n,s,a) ci +�ν(N(n,x),a′) +i=ν(n,s′,a′) ci +exists a.s. for all s, s′, a, a′. +Assumptions E.1, E.4 and E.5 are required in the non-robust differential Q learning (Wan et al., 2021) while Assumption E.3 +holds when the ambiguity set is compact (Wang et al., 2023), which holds from the construction of our ambiguity set. +Assumptions E.4 and E.5 can be satisfied by some common choice of learning rate, i.e., ζ1(n) = +1 +1+n0.6 , ζ2(n) = +1 +1+n0.8 +and ζ3(n) = +1 +1+n and we refer (Wan et al., 2021) for detailed discussion about their implications. +Our proof of a.s. convergence is similar with the non-robust version, except for some key steps. Moreover, we only need to +prove the convergence of the slowest loop as the other conditions have been proven to satisfy the requirements of our three +timescales framework. Thus we only prove the remaining parts below. +We can rewrite the synchronous DRO Q-learning algorithm as +Qn+1 = Qn + ζ3(n)(T(Qn) − Qn), +where +T(Qn)(s, a) = inf +P ∈P Es′∼P [r(s, a) + max +a∈A Qn(s′, a)]. +Denote g(Q(s, ·)) = maxa∈A Q(s, a). First define operators T, T1 and T2 as follow, +T(Q)(s, a) := r(s, a) + inf +P ∈P Es′∼Ps,ag(Q(s′, ·)) +T1(Q) := T(Q) − r∗e, +T2(Q) := T(Q) − f(Q)e. + +Single-Trajectory Distributionally Robust Reinforcement Learning +Considere two ODEs: +˙yt = T1(yt) − yt, +(42) +and +˙xt = T2(xt) − xt. +(43) +Note that by the properties of T1 and T2, both Equations 42 and 43 have Lipschitz and thus are well-exposed. +Next we need to prove the following two lemmas about the properties in Equations 42 and 43. +Lemma E.6. Let ¯y be an equilibrium point of the ODE defined in (B.26). Then ∥yt − ¯y∥∞ is nonincreasing, and yt → y∗ +for some equilibrium point y∗ of (42) that may depend on y0. +Proof. Then it’s easy to verify that the operator T is nonexpansive. For any (s, a) ∈ S × A, +|T(Q)(s, a) − T(Q′)| = | inf +P ∈P Es′∼P [r(s, a) + max +a∈A Q(s′, a)] − inf +P ∈P Es′∼P [r(s, a) + max +a∈A Q′(s′, a)]| +≤ inf +P ∈P|Es′∼P [max +a∈A Q(s′, a) − max +a∈A Q′(s′, a)]| +≤ inf +P ∈P max +a∈A|Es′∼P [Q(s′, a) − Q′(s′, a)]| +≤ ∥Q − Q′∥∞. +Due to the arbitrage of the choice of (s, a), we can conclude that +∥T(Q) − T(Q′)∥∞ ≤ ∥Q − Q′∥∞. +Then Lemma E.6 is directly the special case of Theorem 4.1 in (Borkar & Soumyanatha, 1997). +Lemma E.7. (43) has a unique equilibrium at q∗. +Lemma E.7 can be proved using the same argument in the proof in Lemma 3.2 in (Abounadi et al., 2001). +We then show the relation between xt and yt using the following lemma. It shows that the difference between xt and yt is a +vector with identical elements and this vector satisfies a new ODE. +Lemma E.8. Let x0 = y0, then xt = yt + zte, where zt satisfies the ODE ˙zt = −uzt + (r∗ − f (yt)). +Proof. The proof of xt = yt + zte can be proven using the same argument with the Lemma 3.3 in (Abounadi et al., 2001) +as the proof only relies on the fact that T1 is nonexpansive which is immediate result from the nonexpansiveness of T. The +proof of ˙zt = −uzt + (r∗ − f (yt)) can be proven using the same argument of Lemma B.3 in (Wan et al., 2021) as T1 still +satisfies T1 (xt) − T1 (yt) = T1 (yt + zte) − T1 (yt) = T1 (yt) + zte − T1 (yt) = zte. +With the above lemmas, we have +Lemma E.9. q∗ is the globally asymptotically unique equilibrium for Lemma E.7. +Proof. The proof is the same as the proof of Lemma B.4 in (Wan et al., 2021) as the immediate result of Lemma E.8. + diff --git a/n9FKT4oBgHgl3EQfFy0J/content/tmp_files/load_file.txt b/n9FKT4oBgHgl3EQfFy0J/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..990df5760e3fe39ac500008f4bddf7cb75a97e71 --- /dev/null +++ b/n9FKT4oBgHgl3EQfFy0J/content/tmp_files/load_file.txt @@ -0,0 +1,1325 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf,len=1324 +page_content='Single-Trajectory Distributionally Robust Reinforcement Learning Zhipeng Liang * 1 Xiaoteng Ma * 2 Jose Blanchet 3 Jiheng Zhang 1 Zhengyuan Zhou 4 Abstract As a framework for sequential decision-making, Reinforcement Learning (RL) has been regarded as an essential component leading to Artificial General Intelligence (AGI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' However, RL is of- ten criticized for having the same training envi- ronment as the test one, which also hinders its application in the real world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' To mitigate this problem, Distributionally Robust RL (DRRL) is proposed to improve the worst performance in a set of environments that may contain the un- known test environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' Due to the nonlinear- ity of the robustness goal, most of the previous work resort to the model-based approach, learn- ing with either an empirical distribution learned from the data or a simulator that can be sampled infinitely, which limits their applications in sim- ple dynamics environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' In contrast, we at- tempt to design a DRRL algorithm that can be trained along a single trajectory, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=', no repeated sampling from a state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' Based on the standard Q-learning, we propose distributionally robust Q-learning with single trajectory (DRQ), and its average-reward variant named differential DRQ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' We provide asymptotic convergence guarantees and experiments for both settings, demonstrating their superiority in the perturbed environments against the non-robust ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' Introduction Reinforcement Learning (RL) is a machine learning paradigm studying the sequential decision problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' In par- ticular, it aims to learn an optimal policy that maximizes the cumulative return in interacting with the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' While remarkable progress has been shown recently (Silver et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' Mnih et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=', 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' Vinyals et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=', 2019), a key Equal contribution 1Hong Kong University of Science and Technology 2Tsinghua University 3Stanford Univer- sity 4New York University.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/n9FKT4oBgHgl3EQfFy0J/content/2301.11721v1.pdf'} +page_content=' Correspondence to: Zhipeng Liang > ∆Ez. +Used for swap gate protocol. (b) ∆Ez >> J(ϵ). Used for +CROT/CPHASE gate protocols. +qubit. Both quantum gates can be realized in quantum dots +when the magnetic field gradient is much larger than exchange +splitting, i.e. ∆Ez ≫ J(ϵ). In this case, energy eigenvalues of the +effective Hamiltonian either shift or stay the same depending on +eigenstate. When eigenstates are anti-parallel, eigenenergies shift +in energy with increasing ϵ, while for parallel eigenstates, they +remain the same as shown in Fig. 1b. In all cases, the eigenstates +remain in their initial states. Implementation of CPHASE utilizes +the difference in eigenenergies of anti-parallel states before and +after applying detuning ϵ. When an adiabatic pulse is applied +to the system (keeping the system in an eigenstate of the +Hamiltonian), anti-parallel states accumulate phases relative +to parallel states, which is equivalent to CPHASE up to single- +qubit gates. +Implementation of CROT takes advantage of phase shifts +of anti-parallel states in a different way. As shown in Fig. 1b, +when the system is adiabatically pulsed with the detuning ϵ0, +the shifts in eigenenergies of anti-parallel states cause transition +frequencies between |↓↓⟩-|↑↓⟩ and |↓↑⟩-|↑↑⟩ to deviate from +each other. As we have different resonant frequencies for each +transition [5], we can separately drive the desired transition. +This equips us with CROT in our native quantum gate set. Note +that the use of adiabaticity in CPHASE and CROT may depend +on the used control schemes and the underlying material. +B. Different Implementations of Two-Qubit Gates +While the introduced two-qubit gates are in principle possible, +there are numerous difficulties in exploiting the potential +of having different two-qubit gates in one platform. First, +preferences of some two-qubit gates for device characteristics +are in contrast with those of other two-qubit gates. Second, +gates require adiabatic pulses to suppress unwanted rotations +caused by residual interactions. Adiabatic control allows the +eigenstates to remain in their original states by the adiabatic +theorem, thereby resulting in high gate fidelities. However, +adiabatic control typically increases the gate operation time. +Hence, the spin-qubit platform can have higher gate fidelity +two-qubit gate realizations with a longer gate time and two- +qubit gate realizations with a lower gate fidelity but also shorter +gate time. As we show, the characteristics of such realizations +could be exploited to increase the overall circuit fidelity. Third, +residual interactions occasionally persist despite the adiabatic +pulse and degrade gate fidelities. For example, if one performs +a swap in the presence of a specific Zeeman energy difference +∆Ez, an unwanted rotation caused by ∆Ez will deteriorate the +performance of the swap operation. +These challenges have been tackled by numerous papers [6–8] +to either increase the fidelity or decrease the gate time. Notably, +[6] implemented all two-qubit gates discussed above with high +fidelities and short gate times within a single spin-qubit platform. +Table I: Investigated gate durations and fidelities +SU(2) +CZ +CZdb +CROT +SWAPd +SWAPc +Fidelity +0.999 +0.999 +0.99 +0.994 +0.99 +0.999 +Duration D0 [ns] +30 +152 +67 +660 +19 +89 +Duration D1 [ns] +30 +151 +7 +660 +9 +13 +The authors used a geometric gate to decrease gate time and +increase gate fidelity. Also, they utilized composite pulses to +further suppress unwanted rotations. As a result, they achieved +gate times D0 in table I. However, a scaled-up spin-qubit +platform may require different materials or driving mechanisms. +We therefore also investigate gate time D1 with the fidelities +depicted in table I, where CZdb is a diabatic CZ gate, SWAPd +is a diabatic swap gate and SWAPc is a swap gate realized by +composite pulses. +III. CIRCUIT ADAPTATION TECHNIQUES +Quantum computing can be realized on different quantum +modalities with distinct hardware limitations. Each quantum +technology has its specific basis gate sets that consist of +entangling gates and single-qubit gates. For example, the basis +gate sets of IBM quantum hardware include a single two-qubit +gate (CNOT), while for spin qubit devices, the target of this +paper, the entangling gates are typically CPHASE or CROT. +However, a quantum circuit is usually generated for an abstract +gate set or may have been generated for a different hardware +modality than the actual target hardware modality. Therefore, it +needs to be adapted to the given quantum hardware containing +only the basis gate sets. Here, we introduce several commonly +used circuit translation techniques. +Direct Basis Translation. This technique translates the +quantum gates from the source basis defined by the input circuit +to the target basis according to a pre-defined equivalence library. +The equivalence library includes various ways of decomposing +a gate to its equivalent implementations. For example, a CNOT +gate can be decomposed equivalently to a set of single-qubit +gates in conjunction with one of the three different two-qubit +basis gates: CZ, iSWAP, or Rzx. If these gates occur in a target +quantum circuit, they can each be replaced by the CNOT basis +gate and single-qubit gates as defined in the equivalence library +and its gate substitution rules. +Template Optimization. This is a circuit optimization tech- +nique, typically used for reducing the error or duration of +quantum operations [9], that consists of three individual steps. +First, the template input to the technique must be constructed. +A template is generally defined to be a quantum circuit that +evaluates the identity operation. The template consists of +two parts that are functionally inverses but typically have +different basis gates, some examples of which are shown in +Figure. 3(a)-(d). In the second step, template matching is +performed, which aims at finding all the maximal matches +of the input templates in the target circuit [10–12]. In a final +step, template substitution is performed. During this step, the +matched part of the original subcircuit is replaced by the inverse +of the unmatched part if the unmatched part of the template has +a lower cost. The cost can be evaluated with various metrics, +such as gate implementation cost, error rate, or gate duration. +As opposed to direct basis substitution, where non-basis gates +are simplified by targeting basis gates through the equivalence +library, template optimization offers the flexibility of converting +between different basis gates and optimizing certain circuit +patterns more effectively. +Unitary Decomposition. This is the process of translating a +given unitary matrix to a sequence of single and two-qubit gates. + +This is also known as circuit synthesis. It can be particularly +useful for applications composed of arbitrary unitary gates, such +as quantum volume circuit [13], to convert the unitary matrices +to hardware basis gate sets and generate a synthesized circuit. +Several methods have been proposed to reduce the number +of gates in the synthesized circuit, such as cosine-sine matrix +decomposition (CSD) [14], quantum Shannon decomposition +(QSD) [15], and KAK decomposition [16]. +Suitability for Quantum Circuit Adaptation. Various cir- +cuit adaptation techniques introduced in this section are defined +as transpilation passes in a quantum compiler and each of +them works well independently. However, if each technique is +applied separately during the circuit transpilation process, the +performance of the quantum circuit adaptation is limited. +For direct basis translation and the unitary decomposition +method, the adaptation is only performed with one two-qubit +basis gate which lacks flexibility when a combination of both +would improve the quantum circuit even further. While it is +possible for template optimization to adapt a quantum circuit +to various two-qubit basis gates, only a local solution can +be determined for one template at a time [17]. The same +result quality as possible with a global optimization relying +on evaluating multiple templates at the same time can not +be reached. A clear method for combining these approaches +in an optimized way remains elusive, and is the subject of +our investigation. Our proposed method incorporates the above +approaches in a circuit adaptation technique such that an adapted +quantum circuit with high fidelity is obtained. The variations +obtained are specifically evaluated for translating a quantum +circuit to a spin-qubit device with multiple two-qubit basis gates. +IV. SAT-BASED QUANTUM CIRCUIT ADAPTATION +The steps of the proposed method for adapting a quantum +circuit from one quantum hardware modality or an abstract +gate set to a target quantum hardware modality are shown in +Fig. 2. First, the quantum circuit is preprocessed to yield a +set of blocks along with their dependencies and their cost in +terms of fidelity and duration. Then, every specified substitution +rule is evaluated on the quantum circuit. The preprocessed +quantum circuit, the specified substitution rules and the defined +objective function are used to construct an SMT model in a third +step. The SMT model is then input to a SMT solver [18] that +computes an assignment to the model variables such that the +objective function is optimized. The assignment is then used to +derive an adapted quantum circuit using a selection of specified +substitution rules. +Figure 2: Workflow of the developed quantum circuit adaptation +method for an arbitrary input quantum circuit with preprocessing +steps (a), substitution rule evaluations (b) as well as SMT model +construction and SMT solving (c). +The following sections describe the applied preprocessing +steps as well as the evaluation of specified adaptations, then show +the construction of the SMT model and give an explicit example +for adapting a quantum circuit designed for IBM quantum +computers [19] to the spin qubit hardware modality specified +in [6, 20]. +A. Preprocessing +Preprocessing consists of three steps that are applied suc- +cessively. First, the input quantum circuit is partitioned into +two-qubit blocks that contain gates interacting on the same qubit +pair. The order of the blocks is given by a block dependency +graph that contains each block b as a vertex and an edge +a = (b′, b) if block b′ must be computed before block b. +In a second step, each basis gate of the source quantum hard- +ware modality is substituted by basis gates of the target hardware +modality. The basis gate substitution can be performed using +an equivalence library that can be generated manually [21] or +automatically [22]. +Finally, the cost of each block after basis gate substitution +is evaluated in terms of block duration and block fidelity. The +block duration is the length of the critical path in the block, i.e. +the time the target quantum computer needs to execute the block. +The block fidelity is defined as the product of each gate fidelity +in the block. Basis gate translation provides a naive adaptation +that is used as a common reference cost in subsequent steps. +B. Evaluation of Substitution Rules +Each specified substitution rule is evaluated on the input +quantum circuit and then used to define an SMT model in a +subsequent step. During the evaluation of a substitution rule, +the set of substituted gates ps, the set of substitution gates gs, +the affected blocks bs and the cost of the substitution ws are +determined for each substitution s applicable to the quantum +circuit. +A substitution rule can be a gate equivalency, quantum circuit +equivalency or a decomposition method that decomposes a +block to the basis gates of the target hardware modality. The +substitution rules can be defined manually by a domain expert +as a set of quantum circuit or gate equivalencies [17], derived +automatically [22] for the basis gates of the target hardware +modality, or be part of a general decomposition method such +as the KAK decomposition [16]. +Quantum circuit or gate equivalency substitution rules can be +evaluated in polynomial runtime [17]. Evaluating substitution +rules based on decomposition requires one to first compute +the unitary matrix of each block and then evaluate the cost +of a decomposition. Determining the unitary matrix of n-qubit +block requires a runtime exponential in the number of qubits n. +However, for small n, in our case n = 2, the runtime overhead +is not significant. +C. SMT Model for Quantum Circuit Adaptation +In this section we describe how the data from the preprocess- +ing steps and the substitution rule evaluation are used to generate +an SMT model that yields a quantum circuit adaptation from a +source hardware modality to a target hardware modality. The +developed SMT model consists of Boolean variables, constraints +and the definition of linear objective functions. An SMT solver +computes an assignment to the variables of the developed SMT +model that is satisfying all constraints and that is optimal with +respect to the defined model assumptions. In this work the Z3 +solver software was used as an SMT solver [18]. + +1) SMT Model Variables: The developed SMT model for a +quantum circuit with S substitutions, B blocks and a dependency +graph G = (V, A) with vertices V and edges A consists of +variables: +• C = {c1, ..., c|S|}: the set of chosen substitutions for the +quantum circuit adaptation, i.e. the resulting quantum +circuit adaptations only contain a substitution s if cs +evaluates to true. +• E = {e1, ..., e|B|}: the set of block starting times, i.e. the +time at which the computation of a block is started on the +target hardware modality. +• D = {d1, ..., d|B|}: the set of block duration times +• F = {f1, ..., f|B|}: the set of block fidelity +2) SMT Model Constraints: The assignment to sets C, E and +D must be constrained to yield a valid and optimized quantum +circuit adaptation. First, a substitution may only be chosen in a +quantum circuit adaptation, if it does not substitute the same +gates as another chosen substitution: +¬cs ∨ ¬cs′ +∀s, s′ ∈ S : ps ∩ ps′ ̸= ∅, +(1) +where ps and ps′ are the sets of quantum gates that will +be substituted by substitutions s and s′, respectively. The +symbol ¬ refers to the logic negation while the symbol ∧ (∨) +corresponds to the logic conjunction (disjunction). In addition, +the computation of a block in a quantum circuit must obey the +dependency defined in graph G. Thus, the computation of a +block b on a target quantum computer may only start if the +computation of any preceding block b′ has been concluded: +eb ≥ eb′ + db′ +∀b, b′ ∈ B : ab′,b ∈ A, +(2) +where ab′,b is an edge in the block dependency graph G, eb′ +is the time step at which the computation of block b′ and +db′ is the duration of computing block b′. Finally, the block +duration time and block fidelity must be set depending on the +chosen substitutions in the quantum circuit adaptation. The +block duration time db of a block b is set by: +db := D(b) + +� +s∈S′ +D(s) ∧ cs, +(3) +where D(·) returns the duration of a block or quantum gate, and +D(·) gives the reduction in duration incurred by a substitution. +The duration reduction is defined by +D(s) = +� +g∈gs +D(g) – +� +p∈ps +D(p), +(4) +where gs is the set of substitution quantum gates and ps is the +set of substituted quantum gates of substitution s. Likewise, the +fidelity fb of a block b is determined by: +fb := log(F(b)) + +� +s∈S +F(s) ∧ cs +(5) +where F(·) returns the fidelity of a quantum gate or of a +block given by the reference adaptation determined during +preprocessing steps, and F(·) gives the improvement in fidelity +incurred by a substitution. The improvement in fidelity is defined +by: +F(s) = +� +g∈gs +log(F(g)) – +� +p∈ps +log(F(p)), +(6) +where gs are the substitution quantum gates and ps are the +substituted quantum gates of substitution s. +Note that the developed model does not contain functions D(·) +and log(F(·)). Instead, the function value of every substitution +s, quantum gate g and block b in the reference adaptation is +computed before the SMT model is constructed. Furthermore, +the developed model only registers one duration and start +time for a two-qubit block. This introduces single-qubit gate +ambiguities when minimizing the qubit idle time or quantum +circuit duration if the duration (a single-qubit gates) on one +qubit is different to the other in a template or block. +3) Objective Functions: Lastly, we describe the objective +functions investigated in this work. The objective function +provided to the SMT solver is crucial for improving the quantum +circuit adaptation, i.e. improving the probability of computing +the correct result on a noisy, near-term hardware modality. We +investigated objective functions that improve the fidelity of the +adapted quantum circuit, qubit idle time of the adapted quantum +circuit and a combination of both. The qubit idle time has been +observed to be a source of error [23] that should be minimized +in a quantum circuit. We assume the state of a qubit to decay +during idle time, i.e. the state of a qubit is unaffected by the +idle time with probability: +e–d/T, +(7) +where d is the duration during which a qubit is idle and T is +the coherence time of the target hardware modality. The fidelity +objective of the adapted quantum circuit is defined by: +max +� +b +fb, +(8) +where fb is defined as in Eq. 5. The qubit idle time in the +adapted quantum circuit is optimized by: +max –Q · D – � +b db +T +, +(9) +where Q is the number of qubits and D is the total circuit +duration. We also combine these objectives as a product: +max +� +b +log(fb) – Q · D – � +b db +T +. +(10) +4) Determining the SMT Quantum Circuit Adaptation: After +an SMT solver computed an assignment to the SMT model +variables, a substitution S is applied to the target quantum +circuit if cs is set by the SMT solver. A substitution s is applied +to a quantum circuit by substituting quantum gates ps in the +quantum circuit with gs. A quantum gate in the original quantum +circuit is substituted by the basis translation performed in the +preprocessing step if the quantum gate is not part of any chosen +substitution. +Example: Adapting a Quantum Circuit from IBM Backends to +Spin Qubits +In this section we describe the adaptation of a quantum +circuit given in the basis of an IBM quantum computer +based on superconducting qubits [19] to a quantum circuit +suitable for computation on a spin-qubit quantum computer [6]. +Figure 4 shows the quantum circuit and table I (D0) shows the +characteristics of the quantum gates supported by the spin-qubit +quantum computer used in this example [6]. The corresponding +spin-qubit quantum computer supports arbitrary single-qubit +gates in SU(2), a two-qubit controlled-Z (CZ) gate that is also +used for KAK decompositions, two-qubit conditional rotation +gates along an arbitrary axis and two native realizations of swap + +gates (swapd and swapc). We do not consider the diabatic CZ +gate in this example. The swap gate realization swapd requires +less time than the swap gate swapc but also has a lower gate +fidelity than swapc. Depending on the structure of the quantum +circuit, the swap gate swapd or the swap gate swapc may be +preferable in a quantum circuit adaptation, e.g. for reducing +qubit idle time. +(a) (diabatic) Conditional-Z +(b) Conditional-Rotation (CR) +(c) Direct swap gate +(d) Composite swap gate +(e) KAK decomposition using CZ and single-qubit gates +Figure 3: Substitution rules for adapting quantum circuits +generated for IBM backends [19] to spin-based systems [24] +The results of the quantum circuit adaptation steps are shown +in figure 4. First, the target quantum circuit is partitioned into +blocks and the basis gate translation (see figure 3a) is performed +to determine a reference cost for each block. The substitution +rules described in figure 3 are evaluated on the target quantum +circuit in the next step. This yields ten substitution matches, +Figure 4: Quantum circuit adaptation for an example quantum +circuit given in the IBM backend basis. Continuous lines indicate +quantum gates substituted by the same substitution rule. An +orange line corresponds to a KAK decomposition, a blue line to +conditional rotation gates, and violet and black lines to different +swap gate realizations. +where the KAK decomposition (orange line) could be applied +once to each block, the conditional-rotation (blue line) could be +applied in block 1 and block 3 once, and swapd (violet line) as +well as swapc (black line) could each be applied once in block +1 and block 2. The duration of block 1 is set in our example +by: +d1 = 965 + (573 – 965) ∧ c0 + (660 – 422) ∧ c1 + (19 – 543) ∧ c2 ++ (67 – 543) ∧ c3, +(11) +where 965ns is the reference block duration given by the +basis translation, c0, c1, c2, c3 corresponds to whether the KAK +decomposition (c0), the conditional-rotation substitution (c1), +the direct swap substitution swapd (c2) or the composite swap +substitution swapc (c3) is applied. Characteristics of the other +blocks are computed in an analogous way and input to the SMT +model construction (see section IV). Depending on the chosen +objective function different substitutions may be applied during +the quantum circuit adaptation. In this example we assume that +the quantum circuit duration should be minimized. Using a +KAK decomposition, the duration of block 1 would be reduced +by 392ns, the conditional-rotation quantum gate would increase +the duration by 238ns, swapd reduces the duration by 524 and +swapc reduces the duration by 476ns. Substitutions s0, s2 and +s3 as well as substitutions s0 and s1 are incompatible since they +substitute the same set of quantum gates. Thus, applying KAK +substitution s0 reduces the duration of block 1 the most. +The values and equations for the block duration and depen- +dency are entered as an SMT model into an SMT solver whose +result informs the quantum circuit adaptation. +V. RESULTS +In this section, we evaluate the developed SMT model on the +introduced semiconducting spin hardware modality. We investi- +gated the increase in circuit and Hellinger fidelity, and decrease +in qubit idle for quantum volume circuits [13] and random +circuits containing gates from the templates in Fig. 3 with up +to 4 qubits and a quantum circuit depth of up to 160. Two +gate characteristics D0, D1 as given in table I were evaluated. +The developed SMT model is compared to employing a KAK +decomposition using CZ and diabatic CZ gates, template +optimization with two objectives targeting the quantum circuit +fidelity and qubit idle time, and a direct basis translation that +replaces each non-supported two-qubit quantum gate in the +quantum circuit with a CZ gate. The SMT solver was invoked +with the fidelity objective SAT F given in Eq. 8, the idle time +objective SAT R given in Eq. 9, and the combined objective +SAT P as given in Eq. 10. The quantum circuit determined by +direct basis translation is chosen as a baseline for comparison +in the following results. Before employing the quantum circuit +adaptation technique, Qiskit [21] was used to transpile the target +quantum circuit into one compliant with the hardware topology. +A. Circuit Fidelity Increase and Qubit Idle Time Decrease +In this section, we evaluated the impact of quantum circuit +adaptation on the decrease in qubit idle time, and the change in +quantum circuit fidelity as given by the product of individual +gate fidelities. The fidelity and idle time of the quantum circuit +as determined by direct basis translation is chosen as a baseline +for comparison in the following results. As depicted in Fig. 5 the +SMT approach yields the largest improvement in quantum circuit +fidelity of up to 15% over all quantum circuits. Performing +quantum circuit adaptation by only using KAK decompositions +based on (diabatic) CZ gates decreases the overall quantum +circuit fidelity since the KAK decomposition may introduce +additional single-qubit gates compared to template optimization. +In addition, the diabatic CZ gate has a lower gate fidelity +as the baseline basis translation using CZ gates (see table I). +In figure 6 the decrease in qubit idle time of the respective +quantum circuits is depicted for the studied quantum circuit +adaptation techniques. The SMT based approaches yield the +highest decrease in qubit idle time for all but the smallest +quantum circuit. +B. Hellinger Fidelity and Qubit Idle Time +Here, we investigate the impact of the developed approach +on the qubit idle time and the Hellinger fidelity obtained by +performing quantum circuit simulation subject to errors incurred +by a depolarization channel that corresponds to the individual +gate fidelities and thermal relaxation that corresponds to the +qubit idle time [21]. In accordance to [6], we assumed T2 = 2900 +ns and a T1 time that is three orders of magnitudes larger for +thermal relaxation errors. Figure 5 shows the decrease in qubit +idle time on the y-axis and the change in Hellinger fidelity + +Figure 5: Change in quantum circuit fidelity as given by the +product of gate fidelities. +Figure 6: Decrease in qubit idle time yielded by the analyzed +quantum circuit adaptation techniques. +on the x-axis. The developed SMT approaches yield adapted +quantum circuits with the highest decrease in qubit idle time and +the largest increase in Hellinger fidelity. The evaluated quantum +circuit adaptation techniques based on the KAK decomposition +and template optimization occasionally yield good results but +lead to worse results than the developed SMT approaches in +most cases. +VI. CONCLUSION +In this work, we demonstrated the capability of semiconduct- +ing spins to support multiple two-qubit gates, yielding multiple +universal quantum gate sets that can be used during quantum +circuit adaptation to yield quantum circuits with a higher circuit +fidelity or smaller qubit idle time. +The developed SMT approach is particularly well suited to +deal with multiple two-qubit gates and yields a decrease in qubit +Figure 7: Change in Hellinger fidelity and qubit idle time for +the studied quantum circuit adaptation techniques. +idle time of up to 87% and an increase in Hellinger fidelity of +up to 40% compared to direct basis translation. Future research +could include the development of suitable heuristics and the +consideration of n-qubit gates. +ACKNOWLEDGMENT +This work arose as a project from the Qiskit Advocate Men- +torship Program in the Fall of 2021. The authors acknowledge +the use of IBM Quantum Services for this work. This work was +partially funded by the Carl Zeiss foundation. +REFERENCES +[1] +J.-L. Brylinski and R. Brylinski, “Universal quantum gates,” 2001. +[2] +M. J. Bremner, C. M. Dawson, J. L. Dodd, A. Gilchrist, A. W. Harrow, +et al., “Practical scheme for quantum computation with any two-qubit +entangling gate,” Phys. Rev. Lett., vol. 89, p. 247 902, 24 2002. +[3] +R. Hanson, L. P. Kouwenhoven, J. R. Petta, S. Tarucha, and L. M. K. +Vandersypen, “Spins in few-electron quantum dots,” Rev. Mod. Phys., +vol. 79, pp. 1217–1265, 4 2007. +[4] +D. Loss and D. P. DiVincenzo, “Quantum computation with quantum +dots,” Physical Review A, vol. 57, no. 1, p. 120, 1998. +[5] +D. M. Zajac, A. J. Sigillito, M. Russ, F. Borjans, J. M. Taylor, et al., +“Resonantly driven CNOT gate for electron spins,” Science, vol. 359, +no. 6374, pp. 439–442, 2018. +[6] +L. Petit, M. Russ, G. H. Eenink, W. I. Lawrie, J. S. Clarke, et al., +“Design and integration of single-qubit rotations and two-qubit gates +in silicon above one kelvin,” Communications Materials, vol. 3, no. 1, +pp. 1–7, 2022. +[7] +C. Zhang, T. Chen, S. Li, X. Wang, and Z.-Y. Xue, “High-fidelity +geometric gate for silicon-based spin qubits,” Phys. Rev. A, vol. 101, +p. 052 302, 5 2020. +[8] +X. Wang, L. S. Bishop, E. Barnes, J. P. Kestner, and S. D. Sarma, +“Robust quantum gates for singlet-triplet spin qubits using composite +pulses,” Phys. Rev. A, vol. 89, p. 022 310, 2 2014. +[9] +N. Earnest, C. Tornow, and D. J. Egger, “Pulse-efficient circuit transpi- +lation for quantum applications on cross-resonance-based hardware,” +Phys. Rev. Res., vol. 3, p. 043 088, 4 2021. +[10] +N. Abdessaied, M. Soeken, R. Wille, and R. Drechsler, “Exact template +matching using boolean satisfiability,” in IEEE 43rd International +Symposium on Multiple-Valued Logic, 2013, pp. 328–333. +[11] +D. M. Miller, D. Maslov, and G. W. Dueck, “A transformation +based algorithm for reversible logic synthesis,” in Proceedings Design +Automation Conference, 2003, pp. 318–323. +[12] +M. M. Rahman, G. W. Dueck, and J. D. Horton, “An algorithm for +quantum template matching,” ACM Journal on Emerging Technologies +in Computing Systems, vol. 11, no. 3, pp. 1–20, 2014. +[13] +A. W. Cross, L. S. Bishop, S. Sheldon, P. D. Nation, and J. M. Gambetta, +“Validating quantum computers using randomized model circuits,” +Physical Review A, vol. 100, no. 3, p. 032 328, 2019. +[14] +M. M¨ott¨onen, J. J. Vartiainen, V. Bergholm, and M. M. Salomaa, +“Quantum circuits for general multiqubit gates,” Physical review letters, +vol. 93, no. 13, p. 130 502, 2004. +[15] +V. V. Shende, S. S. Bullock, and I. L. Markov, “Synthesis of quantum- +logic circuits,” IEEE Transactions on Computer-Aided Design of +Integrated Circuits and Systems, vol. 25, no. 6, pp. 1000–1010, 2006. +[16] +Y. Nakajima, Y. Kawano, and H. Sekigawa, “A new algorithm for +producing quantum circuits using KAK decompositions,” Quantum +Info. Comput., vol. 6, no. 1, pp. 67–80, 2006. +[17] +R. Iten, R. Moyard, T. Metger, D. Sutter, and S. Woerner, “Exact and +practical pattern matching for quantum circuit optimization,” ACM +Transactions on Quantum Computing, vol. 3, no. 1, 2022. +[18] +L. d. Moura and N. Bjørner, “Z3: An efficient SMT solver,” in +International conference on Tools and Algorithms for the Construction +and Analysis of Systems, Springer, 2008, pp. 337–340. +[19] +IBM Quantum, https://quantum-computing.ibm.com/, 2021. +[20] +J. Yoneda, K. Takeda, T. Otsuka, T. Nakajima, M. R. Delbecq, et al., +“A quantum-dot spin qubit with coherence limited by charge noise and +fidelity higher than 99.9%,” Nature nanotechnology, vol. 13, no. 2, +pp. 102–106, 2018. +[21] +Qiskit: +An +open-source +framework +for +quantum +computing, +www.qiskit.org, 2021. +[22] +J. Pointing, O. Padon, Z. Jia, H. Ma, A. Hirth, et al., “Quanto: +Optimizing quantum circuits with automatic generation of circuit +identities,” 2021. +[23] +P. Jurcevic, A. Javadi-Abhari, L. S. Bishop, I. Lauer, D. F. Bogorin, +et al., “Demonstration of quantum volume 64 on a superconducting +quantum computing system,” Quantum Science and Technology, vol. 6, +no. 2, p. 025 020, 2021. +[24] +L. Petit, H. Eenink, M. Russ, W. Lawrie, N. Hendrickx, et al., “Universal +quantum logic in hot silicon qubits,” Nature, vol. 580, no. 7803, pp. 355– +359, 2020. + +DoD +1 +SAT P +SAT F +SAT R +KAK db-CZ +KAK CZ +Template Opt. R +Template + Opt. FDoD +1 +SAT P +SAT F +SAT R +KAK db-CZ +KAK CZ +Template Opt. R +Template + Opt. F \ No newline at end of file diff --git a/o9FKT4oBgHgl3EQfGS3Z/content/tmp_files/load_file.txt b/o9FKT4oBgHgl3EQfGS3Z/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..282f6e7a944463fa88be0651f7989e67070ff090 --- /dev/null +++ b/o9FKT4oBgHgl3EQfGS3Z/content/tmp_files/load_file.txt @@ -0,0 +1,509 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf,len=508 +page_content='SAT-Based Quantum Circuit Adaptation Sebastian Brandhofer,1 Jinwoong Kim,2 Siyuan Niu,3 Nicholas T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bronn4 1 Institute of Computer Architecture and Computer Engineering and Center for Integrated Quantum Science and Technology, University of Stuttgart, Stuttgart, Germany, e-mail: sebastian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='brandhofer@iti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='uni-stuttgart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='de 2 Applied Physics, Delft University of Technology, Delft, The Netherlands, e-mail: kjw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='kim@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='com 3 LIRMM, University of Montpellier, Montpellier, France, e-mail: siyuan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='niu@lirmm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='fr 4 IBM Quantum, IBM TJ Watson Research Center, Yorktown Heights, NY, USA, e-mail: ntbronn@us.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='ibm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='com Abstract—As the nascent field of quantum computing develops, an increasing number of quantum hardware modalities, such as superconducting electronic circuits, semiconducting spins, trapped ions, and neutral atoms, have become available for performing quantum computations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' These quantum hardware modalities ex- hibit varying characteristics and implement different universal quantum gate sets that may e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' contain several distinct two-qubit quantum gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Adapting a quantum circuit from a, possibly hardware-agnostic, universal quantum gate set to the quantum gate set of a target hardware modality has a crucial impact on the fidelity and duration of the intended quantum computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' However, current quantum circuit adaptation techniques only apply a specific decomposition or allow only for local improve- ments to the target quantum circuit potentially resulting in a quantum computation with less fidelity or more qubit idle time than necessary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' These issues are further aggravated by the multiple options of hardware-native quantum gates rendering multiple universal quantum gates sets accessible to a hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In this work, we developed a satisfiability modulo theories model that determines an optimized quantum circuit adaptation given a set of allowed substitutions and decompositions, a target hardware modality and the quantum circuit to be adapted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' We further discuss the physics of the semiconducting spins hardware modality, show possible implementations of distinct two-qubit quantum gates, and evaluate the developed model on the semiconducting spins hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Using the developed quantum circuit adaptation method on a noisy simulator, we show the Hellinger fidelity could be improved by up to 40% and the qubit idle time could be decreased by up to 87% compared to alternative quantum circuit adaptation techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' INTRODUCTION Currently, numerous noisy quantum hardware modalities for quantum computing satisfy the requirements for universal quantum computing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' However, no as-realized hardware modality has achieved fault-tolerance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Therefore, while universal, current quantum modalities are limited in circuit depth by gate fidelity and ultimately by the qubit coherence times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Therefore circuit adaptation strategies that reduce gate count, transform operations into equivalent ones with less incurred error, and reduce qubit idle time, are highly relevant for quantum advantage on near- term noisy quantum hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' An arbitrary quantum computation may be decomposed into a set of single- and two-qubit gates [1, 2], however hardware modalities typically suffer higher infidelity from the two-qubit gates than single-qubit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Hence quantum circuit adaptation for hardware modalities that admit multiple two-qubit gates can be used to improve overall result quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Here we choose the modality of spin qubits in semiconductor dots as an example that admits three two-qubit gates depending on parameter regime and control to study a satisfiability modulo theories (SMT) model for quantum circuit adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In this work, we begin with a discussion of the physics of spin qubits in semiconducting quantum dots in section II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Depending on the control applied, each of three two-qubit gates swap, CPHASE, and CROT can be realized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The latter two of which are universal together with single-qubit rotations while the swap operation is useful for limited-connectivity modalities such as spins.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This is followed by a discussion of the costs associated with each operation, such as infidelity and duration, that will inform our SMT model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' After an overview of circuit adaptation techniques in section III, we provide a rigorous description of our SMT model and an example for illustration in section IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' TWO-QUBIT GATES IN SEMICONDUCTOR SPIN QUBITS We first demonstrate how to realize two-qubit gates from an effective Hamiltonian based on the physics of spin qubits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A summary of the differing fidelities and durations of such operations follows, preparing the case for our satisfiability modulo theories model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In general, quantum dots are formed by isolating electrons used as qubits from other electrons in the reservoir.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In a two- dimensional electron gas, separation is achieved by applying voltages to barrier and plunger gates, which deplete charge carriers in the semiconducting layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Manipulation of qubits is also achieved via the barrier and plunger gates [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Dynamics of Two-qubit Gates Eigenstates and eigenvalues of an effective Hamiltonian dictate the dynamics of a two-qubit system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' These are depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1a and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1b for different parameter regimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Here, the effective Hamiltonian leads to three widely-used two-qubit gates: the swap gate, controlled phase gate (CPHASE), and controlled rotation gate (CROT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Note that π-angle rotations for CPHASE and CROT result in CZ and CNOT gates, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Following the proposal in [4], Swap gates can be realized by harnessing natural spin exchange interaction, which is most pronounced when the exchange splitting dominates the magnetic field gradient, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' J(ϵ) ≫ ∆Ez.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Energy eigenvalues of the effective Hamiltonian are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' As detuning increases, the eigenstates switch from |↑ ↓⟩ and |↓ ↑⟩ to |T0⟩ = |↑↓⟩ + |↓↑⟩ and |S⟩ = |↑↓⟩ – |↓↑⟩ (up to normalization), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' At detuning ϵ0, the exchange splitting J(ϵ0) between eigenstates are large enough to induce two-qubit operation within coherence time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Swap gate consists of three control signals: (1) increase detuning ϵ0 diabatically preserves the initial state, for example |↓↑⟩ = |T0⟩ – |S⟩ in the new eigenbasis, (2) precess the initial state in a projected two-qubit Bloch sphere defined by |S⟩ and |T0⟩ for time τop = π/J(ϵ0) corresponding to a π-rotation in the z-axis, and (3) decrease detuning diabatically to the initial configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Common choices for two-qubit entangling gates for spin qubits are CPHASE, which changes the phase of the target qubit depending on the state of the control qubit, and CROT, which rotates the target qubit based on the state of the control arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='11725v1 [quant-ph] 27 Jan 2023 (a) (b) Figure 1: Eigenenergies in different regimes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' (a) J(ϵ) >> ∆Ez.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Used for swap gate protocol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' (b) ∆Ez >> J(ϵ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Used for CROT/CPHASE gate protocols.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' qubit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Both quantum gates can be realized in quantum dots when the magnetic field gradient is much larger than exchange splitting, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' ∆Ez ≫ J(ϵ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In this case, energy eigenvalues of the effective Hamiltonian either shift or stay the same depending on eigenstate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' When eigenstates are anti-parallel, eigenenergies shift in energy with increasing ϵ, while for parallel eigenstates, they remain the same as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In all cases, the eigenstates remain in their initial states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Implementation of CPHASE utilizes the difference in eigenenergies of anti-parallel states before and after applying detuning ϵ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' When an adiabatic pulse is applied to the system (keeping the system in an eigenstate of the Hamiltonian), anti-parallel states accumulate phases relative to parallel states, which is equivalent to CPHASE up to single- qubit gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Implementation of CROT takes advantage of phase shifts of anti-parallel states in a different way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1b, when the system is adiabatically pulsed with the detuning ϵ0, the shifts in eigenenergies of anti-parallel states cause transition frequencies between |↓↓⟩-|↑↓⟩ and |↓↑⟩-|↑↑⟩ to deviate from each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' As we have different resonant frequencies for each transition [5], we can separately drive the desired transition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This equips us with CROT in our native quantum gate set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Note that the use of adiabaticity in CPHASE and CROT may depend on the used control schemes and the underlying material.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Different Implementations of Two-Qubit Gates While the introduced two-qubit gates are in principle possible, there are numerous difficulties in exploiting the potential of having different two-qubit gates in one platform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' First, preferences of some two-qubit gates for device characteristics are in contrast with those of other two-qubit gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Second, gates require adiabatic pulses to suppress unwanted rotations caused by residual interactions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Adiabatic control allows the eigenstates to remain in their original states by the adiabatic theorem, thereby resulting in high gate fidelities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' However, adiabatic control typically increases the gate operation time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Hence, the spin-qubit platform can have higher gate fidelity two-qubit gate realizations with a longer gate time and two- qubit gate realizations with a lower gate fidelity but also shorter gate time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' As we show, the characteristics of such realizations could be exploited to increase the overall circuit fidelity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Third, residual interactions occasionally persist despite the adiabatic pulse and degrade gate fidelities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' For example, if one performs a swap in the presence of a specific Zeeman energy difference ∆Ez, an unwanted rotation caused by ∆Ez will deteriorate the performance of the swap operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' These challenges have been tackled by numerous papers [6–8] to either increase the fidelity or decrease the gate time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Notably, [6] implemented all two-qubit gates discussed above with high fidelities and short gate times within a single spin-qubit platform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Table I: Investigated gate durations and fidelities SU(2) CZ CZdb CROT SWAPd SWAPc Fidelity 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='999 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='999 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='994 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='999 Duration D0 [ns] 30 152 67 660 19 89 Duration D1 [ns] 30 151 7 660 9 13 The authors used a geometric gate to decrease gate time and increase gate fidelity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Also, they utilized composite pulses to further suppress unwanted rotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' As a result, they achieved gate times D0 in table I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' However, a scaled-up spin-qubit platform may require different materials or driving mechanisms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' We therefore also investigate gate time D1 with the fidelities depicted in table I, where CZdb is a diabatic CZ gate, SWAPd is a diabatic swap gate and SWAPc is a swap gate realized by composite pulses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' CIRCUIT ADAPTATION TECHNIQUES Quantum computing can be realized on different quantum modalities with distinct hardware limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Each quantum technology has its specific basis gate sets that consist of entangling gates and single-qubit gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' For example, the basis gate sets of IBM quantum hardware include a single two-qubit gate (CNOT), while for spin qubit devices, the target of this paper, the entangling gates are typically CPHASE or CROT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' However, a quantum circuit is usually generated for an abstract gate set or may have been generated for a different hardware modality than the actual target hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Therefore, it needs to be adapted to the given quantum hardware containing only the basis gate sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Here, we introduce several commonly used circuit translation techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Direct Basis Translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This technique translates the quantum gates from the source basis defined by the input circuit to the target basis according to a pre-defined equivalence library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The equivalence library includes various ways of decomposing a gate to its equivalent implementations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' For example, a CNOT gate can be decomposed equivalently to a set of single-qubit gates in conjunction with one of the three different two-qubit basis gates: CZ, iSWAP, or Rzx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' If these gates occur in a target quantum circuit, they can each be replaced by the CNOT basis gate and single-qubit gates as defined in the equivalence library and its gate substitution rules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Template Optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This is a circuit optimization tech- nique, typically used for reducing the error or duration of quantum operations [9], that consists of three individual steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' First, the template input to the technique must be constructed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A template is generally defined to be a quantum circuit that evaluates the identity operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The template consists of two parts that are functionally inverses but typically have different basis gates, some examples of which are shown in Figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 3(a)-(d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In the second step, template matching is performed, which aims at finding all the maximal matches of the input templates in the target circuit [10–12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In a final step, template substitution is performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' During this step, the matched part of the original subcircuit is replaced by the inverse of the unmatched part if the unmatched part of the template has a lower cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The cost can be evaluated with various metrics, such as gate implementation cost, error rate, or gate duration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' As opposed to direct basis substitution, where non-basis gates are simplified by targeting basis gates through the equivalence library, template optimization offers the flexibility of converting between different basis gates and optimizing certain circuit patterns more effectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Unitary Decomposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This is the process of translating a given unitary matrix to a sequence of single and two-qubit gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This is also known as circuit synthesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' It can be particularly useful for applications composed of arbitrary unitary gates, such as quantum volume circuit [13], to convert the unitary matrices to hardware basis gate sets and generate a synthesized circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Several methods have been proposed to reduce the number of gates in the synthesized circuit, such as cosine-sine matrix decomposition (CSD) [14], quantum Shannon decomposition (QSD) [15], and KAK decomposition [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Suitability for Quantum Circuit Adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Various cir- cuit adaptation techniques introduced in this section are defined as transpilation passes in a quantum compiler and each of them works well independently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' However, if each technique is applied separately during the circuit transpilation process, the performance of the quantum circuit adaptation is limited.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' For direct basis translation and the unitary decomposition method, the adaptation is only performed with one two-qubit basis gate which lacks flexibility when a combination of both would improve the quantum circuit even further.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' While it is possible for template optimization to adapt a quantum circuit to various two-qubit basis gates, only a local solution can be determined for one template at a time [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The same result quality as possible with a global optimization relying on evaluating multiple templates at the same time can not be reached.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A clear method for combining these approaches in an optimized way remains elusive, and is the subject of our investigation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Our proposed method incorporates the above approaches in a circuit adaptation technique such that an adapted quantum circuit with high fidelity is obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The variations obtained are specifically evaluated for translating a quantum circuit to a spin-qubit device with multiple two-qubit basis gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' SAT-BASED QUANTUM CIRCUIT ADAPTATION The steps of the proposed method for adapting a quantum circuit from one quantum hardware modality or an abstract gate set to a target quantum hardware modality are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' First, the quantum circuit is preprocessed to yield a set of blocks along with their dependencies and their cost in terms of fidelity and duration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Then, every specified substitution rule is evaluated on the quantum circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The preprocessed quantum circuit, the specified substitution rules and the defined objective function are used to construct an SMT model in a third step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The SMT model is then input to a SMT solver [18] that computes an assignment to the model variables such that the objective function is optimized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The assignment is then used to derive an adapted quantum circuit using a selection of specified substitution rules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Figure 2: Workflow of the developed quantum circuit adaptation method for an arbitrary input quantum circuit with preprocessing steps (a), substitution rule evaluations (b) as well as SMT model construction and SMT solving (c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The following sections describe the applied preprocessing steps as well as the evaluation of specified adaptations, then show the construction of the SMT model and give an explicit example for adapting a quantum circuit designed for IBM quantum computers [19] to the spin qubit hardware modality specified in [6, 20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Preprocessing Preprocessing consists of three steps that are applied suc- cessively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' First, the input quantum circuit is partitioned into two-qubit blocks that contain gates interacting on the same qubit pair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The order of the blocks is given by a block dependency graph that contains each block b as a vertex and an edge a = (b′, b) if block b′ must be computed before block b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In a second step, each basis gate of the source quantum hard- ware modality is substituted by basis gates of the target hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The basis gate substitution can be performed using an equivalence library that can be generated manually [21] or automatically [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Finally, the cost of each block after basis gate substitution is evaluated in terms of block duration and block fidelity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The block duration is the length of the critical path in the block, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' the time the target quantum computer needs to execute the block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The block fidelity is defined as the product of each gate fidelity in the block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Basis gate translation provides a naive adaptation that is used as a common reference cost in subsequent steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Evaluation of Substitution Rules Each specified substitution rule is evaluated on the input quantum circuit and then used to define an SMT model in a subsequent step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' During the evaluation of a substitution rule, the set of substituted gates ps, the set of substitution gates gs, the affected blocks bs and the cost of the substitution ws are determined for each substitution s applicable to the quantum circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A substitution rule can be a gate equivalency, quantum circuit equivalency or a decomposition method that decomposes a block to the basis gates of the target hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The substitution rules can be defined manually by a domain expert as a set of quantum circuit or gate equivalencies [17], derived automatically [22] for the basis gates of the target hardware modality, or be part of a general decomposition method such as the KAK decomposition [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Quantum circuit or gate equivalency substitution rules can be evaluated in polynomial runtime [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Evaluating substitution rules based on decomposition requires one to first compute the unitary matrix of each block and then evaluate the cost of a decomposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Determining the unitary matrix of n-qubit block requires a runtime exponential in the number of qubits n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' However, for small n, in our case n = 2, the runtime overhead is not significant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' SMT Model for Quantum Circuit Adaptation In this section we describe how the data from the preprocess- ing steps and the substitution rule evaluation are used to generate an SMT model that yields a quantum circuit adaptation from a source hardware modality to a target hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The developed SMT model consists of Boolean variables, constraints and the definition of linear objective functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' An SMT solver computes an assignment to the variables of the developed SMT model that is satisfying all constraints and that is optimal with respect to the defined model assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In this work the Z3 solver software was used as an SMT solver [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1) SMT Model Variables: The developed SMT model for a quantum circuit with S substitutions, B blocks and a dependency graph G = (V, A) with vertices V and edges A consists of variables: C = {c1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', c|S|}: the set of chosen substitutions for the quantum circuit adaptation, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' the resulting quantum circuit adaptations only contain a substitution s if cs evaluates to true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' E = {e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', e|B|}: the set of block starting times, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' the time at which the computation of a block is started on the target hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' D = {d1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', d|B|}: the set of block duration times F = {f1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', f|B|}: the set of block fidelity 2) SMT Model Constraints: The assignment to sets C, E and D must be constrained to yield a valid and optimized quantum circuit adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' First, a substitution may only be chosen in a quantum circuit adaptation, if it does not substitute the same gates as another chosen substitution: ¬cs ∨ ¬cs′ ∀s, s′ ∈ S : ps ∩ ps′ ̸= ∅, (1) where ps and ps′ are the sets of quantum gates that will be substituted by substitutions s and s′, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The symbol ¬ refers to the logic negation while the symbol ∧ (∨) corresponds to the logic conjunction (disjunction).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In addition, the computation of a block in a quantum circuit must obey the dependency defined in graph G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Thus, the computation of a block b on a target quantum computer may only start if the computation of any preceding block b′ has been concluded: eb ≥ eb′ + db′ ∀b, b′ ∈ B : ab′,b ∈ A, (2) where ab′,b is an edge in the block dependency graph G, eb′ is the time step at which the computation of block b′ and db′ is the duration of computing block b′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Finally, the block duration time and block fidelity must be set depending on the chosen substitutions in the quantum circuit adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The block duration time db of a block b is set by: db := D(b) + � s∈S′ D(s) ∧ cs, (3) where D(·) returns the duration of a block or quantum gate, and D(·) gives the reduction in duration incurred by a substitution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The duration reduction is defined by D(s) = � g∈gs D(g) – � p∈ps D(p), (4) where gs is the set of substitution quantum gates and ps is the set of substituted quantum gates of substitution s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Likewise, the fidelity fb of a block b is determined by: fb := log(F(b)) + � s∈S F(s) ∧ cs (5) where F(·) returns the fidelity of a quantum gate or of a block given by the reference adaptation determined during preprocessing steps, and F(·) gives the improvement in fidelity incurred by a substitution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The improvement in fidelity is defined by: F(s) = � g∈gs log(F(g)) – � p∈ps log(F(p)), (6) where gs are the substitution quantum gates and ps are the substituted quantum gates of substitution s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Note that the developed model does not contain functions D(·) and log(F(·)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Instead, the function value of every substitution s, quantum gate g and block b in the reference adaptation is computed before the SMT model is constructed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Furthermore, the developed model only registers one duration and start time for a two-qubit block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This introduces single-qubit gate ambiguities when minimizing the qubit idle time or quantum circuit duration if the duration (a single-qubit gates) on one qubit is different to the other in a template or block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 3) Objective Functions: Lastly, we describe the objective functions investigated in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The objective function provided to the SMT solver is crucial for improving the quantum circuit adaptation, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' improving the probability of computing the correct result on a noisy, near-term hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' We investigated objective functions that improve the fidelity of the adapted quantum circuit, qubit idle time of the adapted quantum circuit and a combination of both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The qubit idle time has been observed to be a source of error [23] that should be minimized in a quantum circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' We assume the state of a qubit to decay during idle time, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' the state of a qubit is unaffected by the idle time with probability: e–d/T, (7) where d is the duration during which a qubit is idle and T is the coherence time of the target hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The fidelity objective of the adapted quantum circuit is defined by: max � b fb, (8) where fb is defined as in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The qubit idle time in the adapted quantum circuit is optimized by: max –Q · D – � b db T , (9) where Q is the number of qubits and D is the total circuit duration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' We also combine these objectives as a product: max � b log(fb) – Q · D – � b db T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' (10) 4) Determining the SMT Quantum Circuit Adaptation: After an SMT solver computed an assignment to the SMT model variables, a substitution S is applied to the target quantum circuit if cs is set by the SMT solver.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A substitution s is applied to a quantum circuit by substituting quantum gates ps in the quantum circuit with gs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A quantum gate in the original quantum circuit is substituted by the basis translation performed in the preprocessing step if the quantum gate is not part of any chosen substitution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Example: Adapting a Quantum Circuit from IBM Backends to Spin Qubits In this section we describe the adaptation of a quantum circuit given in the basis of an IBM quantum computer based on superconducting qubits [19] to a quantum circuit suitable for computation on a spin-qubit quantum computer [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Figure 4 shows the quantum circuit and table I (D0) shows the characteristics of the quantum gates supported by the spin-qubit quantum computer used in this example [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The corresponding spin-qubit quantum computer supports arbitrary single-qubit gates in SU(2), a two-qubit controlled-Z (CZ) gate that is also used for KAK decompositions, two-qubit conditional rotation gates along an arbitrary axis and two native realizations of swap gates (swapd and swapc).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' We do not consider the diabatic CZ gate in this example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The swap gate realization swapd requires less time than the swap gate swapc but also has a lower gate fidelity than swapc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Depending on the structure of the quantum circuit, the swap gate swapd or the swap gate swapc may be preferable in a quantum circuit adaptation, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' for reducing qubit idle time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' (a) (diabatic) Conditional-Z (b) Conditional-Rotation (CR) (c) Direct swap gate (d) Composite swap gate (e) KAK decomposition using CZ and single-qubit gates Figure 3: Substitution rules for adapting quantum circuits generated for IBM backends [19] to spin-based systems [24] The results of the quantum circuit adaptation steps are shown in figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' First, the target quantum circuit is partitioned into blocks and the basis gate translation (see figure 3a) is performed to determine a reference cost for each block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The substitution rules described in figure 3 are evaluated on the target quantum circuit in the next step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This yields ten substitution matches, Figure 4: Quantum circuit adaptation for an example quantum circuit given in the IBM backend basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Continuous lines indicate quantum gates substituted by the same substitution rule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' An orange line corresponds to a KAK decomposition, a blue line to conditional rotation gates, and violet and black lines to different swap gate realizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' where the KAK decomposition (orange line) could be applied once to each block, the conditional-rotation (blue line) could be applied in block 1 and block 3 once, and swapd (violet line) as well as swapc (black line) could each be applied once in block 1 and block 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The duration of block 1 is set in our example by: d1 = 965 + (573 – 965) ∧ c0 + (660 – 422) ∧ c1 + (19 – 543) ∧ c2 + (67 – 543) ∧ c3, (11) where 965ns is the reference block duration given by the basis translation, c0, c1, c2, c3 corresponds to whether the KAK decomposition (c0), the conditional-rotation substitution (c1), the direct swap substitution swapd (c2) or the composite swap substitution swapc (c3) is applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Characteristics of the other blocks are computed in an analogous way and input to the SMT model construction (see section IV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Depending on the chosen objective function different substitutions may be applied during the quantum circuit adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In this example we assume that the quantum circuit duration should be minimized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Using a KAK decomposition, the duration of block 1 would be reduced by 392ns, the conditional-rotation quantum gate would increase the duration by 238ns, swapd reduces the duration by 524 and swapc reduces the duration by 476ns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Substitutions s0, s2 and s3 as well as substitutions s0 and s1 are incompatible since they substitute the same set of quantum gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Thus, applying KAK substitution s0 reduces the duration of block 1 the most.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The values and equations for the block duration and depen- dency are entered as an SMT model into an SMT solver whose result informs the quantum circuit adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' RESULTS In this section, we evaluate the developed SMT model on the introduced semiconducting spin hardware modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' We investi- gated the increase in circuit and Hellinger fidelity, and decrease in qubit idle for quantum volume circuits [13] and random circuits containing gates from the templates in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 3 with up to 4 qubits and a quantum circuit depth of up to 160.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Two gate characteristics D0, D1 as given in table I were evaluated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The developed SMT model is compared to employing a KAK decomposition using CZ and diabatic CZ gates, template optimization with two objectives targeting the quantum circuit fidelity and qubit idle time, and a direct basis translation that replaces each non-supported two-qubit quantum gate in the quantum circuit with a CZ gate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The SMT solver was invoked with the fidelity objective SAT F given in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 8, the idle time objective SAT R given in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 9, and the combined objective SAT P as given in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The quantum circuit determined by direct basis translation is chosen as a baseline for comparison in the following results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Before employing the quantum circuit adaptation technique, Qiskit [21] was used to transpile the target quantum circuit into one compliant with the hardware topology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Circuit Fidelity Increase and Qubit Idle Time Decrease In this section, we evaluated the impact of quantum circuit adaptation on the decrease in qubit idle time, and the change in quantum circuit fidelity as given by the product of individual gate fidelities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The fidelity and idle time of the quantum circuit as determined by direct basis translation is chosen as a baseline for comparison in the following results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' As depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 5 the SMT approach yields the largest improvement in quantum circuit fidelity of up to 15% over all quantum circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Performing quantum circuit adaptation by only using KAK decompositions based on (diabatic) CZ gates decreases the overall quantum circuit fidelity since the KAK decomposition may introduce additional single-qubit gates compared to template optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In addition, the diabatic CZ gate has a lower gate fidelity as the baseline basis translation using CZ gates (see table I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In figure 6 the decrease in qubit idle time of the respective quantum circuits is depicted for the studied quantum circuit adaptation techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The SMT based approaches yield the highest decrease in qubit idle time for all but the smallest quantum circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Hellinger Fidelity and Qubit Idle Time Here, we investigate the impact of the developed approach on the qubit idle time and the Hellinger fidelity obtained by performing quantum circuit simulation subject to errors incurred by a depolarization channel that corresponds to the individual gate fidelities and thermal relaxation that corresponds to the qubit idle time [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' In accordance to [6], we assumed T2 = 2900 ns and a T1 time that is three orders of magnitudes larger for thermal relaxation errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Figure 5 shows the decrease in qubit idle time on the y-axis and the change in Hellinger fidelity Figure 5: Change in quantum circuit fidelity as given by the product of gate fidelities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Figure 6: Decrease in qubit idle time yielded by the analyzed quantum circuit adaptation techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' on the x-axis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The developed SMT approaches yield adapted quantum circuits with the highest decrease in qubit idle time and the largest increase in Hellinger fidelity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The evaluated quantum circuit adaptation techniques based on the KAK decomposition and template optimization occasionally yield good results but lead to worse results than the developed SMT approaches in most cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' CONCLUSION In this work, we demonstrated the capability of semiconduct- ing spins to support multiple two-qubit gates, yielding multiple universal quantum gate sets that can be used during quantum circuit adaptation to yield quantum circuits with a higher circuit fidelity or smaller qubit idle time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The developed SMT approach is particularly well suited to deal with multiple two-qubit gates and yields a decrease in qubit Figure 7: Change in Hellinger fidelity and qubit idle time for the studied quantum circuit adaptation techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' idle time of up to 87% and an increase in Hellinger fidelity of up to 40% compared to direct basis translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Future research could include the development of suitable heuristics and the consideration of n-qubit gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' ACKNOWLEDGMENT This work arose as a project from the Qiskit Advocate Men- torship Program in the Fall of 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' The authors acknowledge the use of IBM Quantum Services for this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' This work was partially funded by the Carl Zeiss foundation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' REFERENCES [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Brylinski and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Brylinski, “Universal quantum gates,” 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [2] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bremner, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Dawson, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Dodd, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Gilchrist, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Harrow, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', “Practical scheme for quantum computation with any two-qubit entangling gate,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 89, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 247 902, 24 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [3] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Hanson, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Kouwenhoven, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Petta, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Tarucha, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Vandersypen, “Spins in few-electron quantum dots,” Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 79, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1217–1265, 4 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [4] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Loss and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' DiVincenzo, “Quantum computation with quantum dots,” Physical Review A, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 57, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 120, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [5] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Zajac, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Sigillito, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Russ, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Borjans, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Taylor, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', “Resonantly driven CNOT gate for electron spins,” Science, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 359, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 6374, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 439–442, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [6] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Petit, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Russ, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Eenink, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Lawrie, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Clarke, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', “Design and integration of single-qubit rotations and two-qubit gates in silicon above one kelvin,” Communications Materials, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 3, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1–7, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [7] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Zhang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Chen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Li, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Wang, and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Xue, “High-fidelity geometric gate for silicon-based spin qubits,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 101, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 052 302, 5 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [8] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Wang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bishop, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Barnes, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Kestner, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Sarma, “Robust quantum gates for singlet-triplet spin qubits using composite pulses,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' A, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 89, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 022 310, 2 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [9] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Earnest, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Tornow, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Egger, “Pulse-efficient circuit transpi- lation for quantum applications on cross-resonance-based hardware,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 3, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 043 088, 4 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [10] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Abdessaied, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Soeken, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Wille, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Drechsler, “Exact template matching using boolean satisfiability,” in IEEE 43rd International Symposium on Multiple-Valued Logic, 2013, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 328–333.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [11] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Miller, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Maslov, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Dueck, “A transformation based algorithm for reversible logic synthesis,” in Proceedings Design Automation Conference, 2003, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 318–323.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [12] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Rahman, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Dueck, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Horton, “An algorithm for quantum template matching,” ACM Journal on Emerging Technologies in Computing Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 11, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1–20, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [13] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Cross, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bishop, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Sheldon, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Nation, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Gambetta, “Validating quantum computers using randomized model circuits,” Physical Review A, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 100, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 3, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 032 328, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [14] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M¨ott¨onen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Vartiainen, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bergholm, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Salomaa, “Quantum circuits for general multiqubit gates,” Physical review letters, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 93, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 13, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 130 502, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [15] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Shende, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bullock, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Markov, “Synthesis of quantum- logic circuits,” IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 25, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 6, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1000–1010, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [16] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Nakajima, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Kawano, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Sekigawa, “A new algorithm for producing quantum circuits using KAK decompositions,” Quantum Info.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 6, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 67–80, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [17] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Iten, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Moyard, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Metger, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Sutter, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Woerner, “Exact and practical pattern matching for quantum circuit optimization,” ACM Transactions on Quantum Computing, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 3, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 1, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [18] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Moura and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bjørner, “Z3: An efficient SMT solver,” in International conference on Tools and Algorithms for the Construction and Analysis of Systems, Springer, 2008, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 337–340.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [19] IBM Quantum, https://quantum-computing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='ibm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='com/, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [20] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Yoneda, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Takeda, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Otsuka, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Nakajima, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Delbecq, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', “A quantum-dot spin qubit with coherence limited by charge noise and fidelity higher than 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='9%,” Nature nanotechnology, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 13, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 102–106, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [21] Qiskit: An open-source framework for quantum computing, www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='qiskit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content='org, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [22] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Pointing, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Padon, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Jia, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Ma, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Hirth, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', “Quanto: Optimizing quantum circuits with automatic generation of circuit identities,” 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [23] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Jurcevic, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Javadi-Abhari, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bishop, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Lauer, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Bogorin, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', “Demonstration of quantum volume 64 on a superconducting quantum computing system,” Quantum Science and Technology, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 6, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 2, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 025 020, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' [24] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Petit, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Eenink, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Russ, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Lawrie, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' Hendrickx, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=', “Universal quantum logic in hot silicon qubits,” Nature, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 580, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 7803, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' 355– 359, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' DoD 1 SAT P SAT F SAT R KAK db-CZ KAK CZ Template Opt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' R Template Opt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' FDoD 1 SAT P SAT F SAT R KAK db-CZ KAK CZ Template Opt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' R Template Opt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} +page_content=' F' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/o9FKT4oBgHgl3EQfGS3Z/content/2301.11725v1.pdf'} diff --git a/p9E4T4oBgHgl3EQfvg1C/content/tmp_files/2301.05242v1.pdf.txt b/p9E4T4oBgHgl3EQfvg1C/content/tmp_files/2301.05242v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..991c1a5e7c41d3892577b0afdf43c5848c358ecf --- /dev/null +++ b/p9E4T4oBgHgl3EQfvg1C/content/tmp_files/2301.05242v1.pdf.txt @@ -0,0 +1,2565 @@ +MNRAS 000, 1–15 (2023) +Preprint 16 January 2023 +Compiled using MNRAS LATEX style file v3.0 +The many reasons that the rotation curves of low-mass galaxies can fail as +tracers of their matter distributions +Eleanor R. Downing,1,2★ Kyle A. Oman1,2† +1Institute for Computational Cosmology, Durham University, South Road, Durham, DH1 3LE, United Kingdom +2Department of Physics, Durham University, South Road, Durham, DH1 3LE, United Kingdom +Accepted XXX. Received YYY; in original form ZZZ +ABSTRACT +It is routinely assumed that galaxy rotation curves are equal to their circular velocity curves (modulo some corrections) such +that they are good dynamical mass tracers. We take a visualisation-driven approach to exploring the limits of the validity of +this assumption for a sample of 33 low-mass galaxies (60 < 𝑣max/km s−1 < 120) from the APOSTLE suite of cosmological +hydrodynamical simulations. Only 3 of these have rotation curves nearly equal to their circular velocity curves at 𝑧 = 0, the +rest are undergoing a wide variety of dynamical perturbations. We use our visualisations to guide an assessment of how many +galaxies are likely to be strongly perturbed by processes in several categories: mergers/interactions (affecting 6/33 galaxies), bulk +radial gas inflows (19/33), vertical gas outflows (15/33), distortions driven by a non-spherical DM halo (17/33), warps (8/33), +and winds due to motion through the IGM (5/33). Most galaxies fall into more than one of these categories; only 5/33 are not in +any of them. The sum of these effects leads to an underestimation of the low-velocity slope of the baryonic Tully-Fisher relation +(𝛼 ∼ 3.1 instead of 𝛼 ∼ 3.9, where 𝑀bar ∝ 𝑣𝛼) that is difficult to avoid, and could plausibly be the source of a significant portion +of the observed diversity in low-mass galaxy rotation curve shapes. +Key words: galaxies: kinematics and dynamics – galaxies: dwarf – dark matter +1 INTRODUCTION +Since the discovery of flat rotation curves in galaxies (Rubin et al. +1980; Bosma 1981) leading to the widespread acceptance of dark +matter (DM) theories, rotation curves have been used to study DM. +Low-mass galaxies, with maximum circular velocities ≲ 120 km s−1, +are particularly well suited for such analysis because their high DM +mass fractions reduce the relative gravitational influence of baryons, +so that their circular velocity almost directly traces their DM content. +The baryonic Tully-Fisher relation (BTFR; McGaugh et al. 2000) +provides a concise summary of this trend: the baryonic (gas plus +stellar) mass of galaxies is observed to be proportional to about the +fourth power of their maximum rotation velocities, 𝑀bar ∝ 𝑣4max +(but see Ponomareva et al. 2018), but a constant baryon-to-DM mass +ratio would instead imply a shallower slope close to 𝑀bar ∝ 𝑣3max +(e.g. Sales et al. 2017). The slope and scatter of the BTFR for the +lowest mass galaxies (𝑀bar ≲ 109 M⊙), however, remain challenging +to constrain (Sorce & Guo 2016; Papastergis et al. 2016; Bradford +et al. 2016; Verbeke et al. 2017; Ponomareva et al. 2018; Mancera +Piña et al. 2019; Lelli et al. 2019; Wingfield McQuinn et al. 2022; +Ball et al. 2022, and Lelli 2022, a review), and leaves the connection +between the luminous components of galaxies and the DM haloes in +which they form at the low-mass edge of galaxy formation uncertain +(Trujillo-Gomez et al. 2011; Desmond 2012; Papastergis et al. 2015; +★ E-mail: eleanor.r.downing@durham.ac.uk +† E-mail: kyle.a.oman@durham.ac.uk +Brook et al. 2016; Oman et al. 2016; Brooks et al. 2017; Sales et al. +2017). +Studies of dwarf galaxies have revealed several potential problems +in near-field cosmology (see Bullock & Boylan-Kolchin 2017; Sales +et al. 2022, for reviews). One such problem that remains unresolved +is the ‘cusp-core’ problem (Flores & Primack 1994; Moore 1994; +de Blok 2010); the inner slopes of low-mass galaxy rotation curves +are often slowly rising compared to the mass profile implied by +the steep central density ‘cusps’ predicted by N-body simulations +(Navarro et al. 1996b). +There have been many proposed resolutions of the cusp-core prob- +lem within the ΛCDM framework. One such proposal is that gas flows +driven by supernova feedback couple gravitationally to the DM and +re-distribute it, producing and maintaining a central density ‘core’ +(e.g., Navarro et al. 1996a; Read & Gilmore 2005; Pontzen & Gov- +ernato 2012, and see Pontzen & Governato 2014, for a review). The +‘bursty’ star formation histories arising in some galaxy formation +simulations produce cores in a limited mass range (e.g., Di Cintio +et al. 2014; Chan et al. 2015; Tollet et al. 2016; Jahn et al. 2021), +and the conditions necessary for core formation via this mechanism +are now well-understood (Bose et al. 2019; Benítez-Llambay et al. +2019). However, whether such effects can fully reproduce the diverse +rotation curves observed for dwarf galaxies remains unclear (e.g. +Oman et al. 2015; Santos-Santos et al. 2020; Roper et al. 2022). +Another proposed scenario involves allowing cold DM particles to +scatter from each other, leading to heat transfer to the inner regions of +DM haloes and redistributing the DM to produce a core (Spergel & +Steinhardt 2000). Such ‘self-interacting dark matter’ (SIDM) models +© 2023 The Authors +arXiv:2301.05242v1 [astro-ph.GA] 12 Jan 2023 + +2 +E. R. Downing & K. A. Oman +inherit the large scale successes of the standard ΛCDM model, and +are able to produce a range of rotation curve shapes by including +the gravitational influence of baryons, which can re-form a cusp (see +Tulin & Yu 2018, for a recent review). This shows promise (e.g., Ren +et al. 2019; Kaplinghat et al. 2020) however, again, concerns whether +SIDM can account for the full observed diversity remain (Creasey +et al. 2017; Santos-Santos et al. 2020). +More prosaically, the problem could be that the circular veloc- +ity curves of low-mass galaxies are not accurately measured by the +methods used to extract them from observations. The inclination +angle (possibly varying with radius), non-circular motions, poten- +tially anisotropic velocity dispersion, and geometrically thick and/or +flared nature of gas discs are just some of the challenging issues that +models in principle need to account for to accurately measure a rota- +tion curve. Strong degeneracies between parameters describing the +geometry and kinematics of a gas disc further complicate matters. +Attempts to model realistic galaxies with known rotation curves have +revealed that the errors due to these issues can be quite severe (Read +et al. 2016; Oman et al. 2019; Roper et al. 2022), although Frosst et al. +(2022) argue that such effects may still fall well short of explaining +the observed diversity in rotation curve shapes. There is, however, an +even more worrying possibility: that the rotation curves of low-mass +galaxies may in some cases not faithfully trace their circular velocity +curves1. In this case even a perfectly accurate measurement of the +rotation curve gives no meaningful information about the total matter +distribution within a galaxy. +It is clear that some low-mass galaxies are not in dynamical equi- +librium, and thus that their rotation curves are not reliable tracers of +their circular velocity curves (and consequently of their DM content). +Obvious perturbations, such as mergers or star formation-driven ‘su- +perbubbles’, are easily identified, however low-mass galaxies’ shal- +low gravitational potential wells make them especially susceptible +to additional perturbations which may not be so obvious. How of- +ten these more subtle physical processes may cause departures from +equilibrium in these objects remains almost unexplored in the liter- +ature (see Hayashi & Navarro 2006 on the effect of a triaxial DM +halo; Valenzuela et al. 2007 on the influence of lopsided gas discs; +Read et al. 2016 on the influence of the star formation cycle; Verbeke +et al. 2017 sec. 4.1 for a brief exploration of the topic). +In this work we make an initial assessment of the relative impor- +tance of different types of perturbations using a sample of galaxies +with maximum circular velocities 60 < 𝑣max/km s−1 < 120 from +the APOSTLE suite of cosmological hydrodynamical simulations. +We create visualisations of the galaxies and compute their rotation +and circular velocity curves at a range of times over the past ∼ 4 Gyr. +We use these to investigate the kind of perturbations that affect low- +mass galaxies, their frequencies, their effects on the galaxies’ rotation +curves, and what conditions are necessary for galaxies to actually ro- +tate at their circular speeds. +We begin in Section 2 with a brief description of the APOSTLE +simulations and our methods for calculating rotation curves and pro- +ducing visualisations. In Section 3, we present our main results: we +describe the perturbations affecting galaxies in our sample, and inves- +tigate their influence on key galaxy scaling relations. We summarize +our conclusions and discuss their implications and applicability to +real galaxies in Section 4. +1 Throughout this work, we use ‘circular velocity curve’ to refer to the speed +of a particle on a circular orbit computed for a given density field, and ‘rotation +curve’ to refer to the orbital speed of gas. +2 METHODS +2.1 The APOSTLE simulations +The APOSTLE2 simulations (Sawala et al. 2016; Fattahi et al. 2016) +are a suite of zoom-in cosmological hydrodynamical galaxy forma- +tion simulations. The suite is made up of 12 regions selected to +resemble the Local Group of galaxies in terms of the masses, separa- +tion and kinematics of a pair of galaxies analogous to the Milky Way +and Andromeda, and a lack of other massive galaxies within a few +megaparsecs. A region about 2 − 3 Mpc in radius around each pair +was simulated at multiple resolution levels (lowest ‘L3’ to highest +‘L1’) with the ‘Reference’ calibration (Crain et al. 2015) of the EA- +GLE galaxy formation model (Schaye et al. 2015). The model is +implemented using a smoothed-particle hydrodynamics framework +in the pressure-entropy formulation (Hopkins 2013) and includes +prescriptions for radiative cooling (Wiersma et al. 2009a), star for- +mation (Schaye 2004; Schaye & Dalla Vecchia 2008), stellar and +chemical enrichment (Wiersma et al. 2009b), thermal-mode stellar +feedback (Dalla Vecchia & Schaye 2012) and cosmic reionisation +(Haardt & Madau 2001; Wiersma et al. 2009b). The feedback from +supermassive black hole accretion implemented in the EAGLE model +has a negligible effect on the galaxies in the APOSTLE simulations +(Sawala et al. 2016). The simulations assume the WMAP-7 cosmo- +logical parameters (Komatsu et al. 2011). +Galaxies are identified in the simulations following a two-step +process. First, particles are linked together by a friends-of-friends +(FoF) algorithm (Davis et al. 1985). Each FoF group is indepen- +dently analysed using the Subfind halo finding algorithm (Springel +et al. 2001; Dolag et al. 2009) which identifies gravitationally bound +substructures. The subhalo with the minimum gravitational potential +in each FoF group is labelled the ‘central’ galaxy of the group, while +others are labelled ‘satellites’. We label galaxies from the APOSTLE +simulations following the same convention as Oman et al. (2019): +for example, AP-L1-V6-5-0 refers to APOSTLE resolution level L1, +region (volume) V6, FoF group 5, subhalo 0 (the ‘central’ subhalo). +We always refer to the identifier of the galaxy in the last snapshot; +its progenitor(s) may have different identifiers. We track the progen- +itors of galaxies in our sample back through time using the merger +tree algorithm of Helly et al. (2003). When a galaxy has more than +one progenitor at a previous time, we follow the progenitor that con- +tributed the most particles to the descendant. +In this work we focus on a sample drawn exclusively from the +highest-resolution (L1) simulations in the suite. Only regions V1, +V4, V6, V10 and V11 have been simulated at this resolution. At +L1 resolution, the gas (dark matter) particle mass is typically 7.4 × +103 M⊙ (3.6 × 104 M⊙), and the maximum gravitational softening +length is ≈ 134 pc. According to the criterion of Power et al. (2003), +the circular velocity curves of low-mass galaxies at this resolution +level are numerically converged to better than 10 per cent at radii +≳ 700 pc. +We focus on recent times, between 8.9 Gyr and 13.76 Gyr (𝑧 = 0). +In this period there are 17 full simulation outputs (every ∼ 0.3 Gyr), +or ‘snapshots’, and 147 partial outputs (every 34 Myr), or ‘snipshots’, +where some detail – such as abundances of individual elements – is +omitted. +Our sample of galaxies is the same as that used by Oman et al. +(2019). The galaxies are selected to have maximum circular velocities +60 < 𝑣max/km s−1 < 120, to be centrals (not satellites), and to be +found in FoF groups which do not include any contaminating low- +2 A Project Of Simulating The Local Environment +MNRAS 000, 1–15 (2023) + +Rotation curves of low-mass galaxies +3 +0 +5 +10 +R [kpc] +0 +20 +40 +60 +80 +100 +120 +vcirc or vrot [km s−1] +9 +10 +11 +12 +13 +t [Gyr] +Figure 1. The rotation curves of the galaxy AP-L1-V6-5-0 at times between +8.88 Gyr and 13.76 Gyr. The circular velocity curve increases gradually over +time within the purple band. The extracted rotation curves are much more +variable and are plotted with coloured curves, with yellow for earlier and red +for later times. The largest fluctuation in the rotation curves coincides with +the time of a merger with a gas-rich companion. +resolution particles from outside the nominal zoom-in regions of the +simulations. There are 33 such galaxies, with 4 found in simulation +region V1, 5 in V4, 11 in V6, 10 in V10, and 3 in V11. All are at +separated from the nearest of the pair of galaxies analogous to the +Milky Way and M 31 by at least 450 kpc, and up to3 4 Mpc. +2.2 Circular velocity and rotation curves +We calculated the total circular velocity curves of galaxies in our +sample as 𝑣circ = +√︁ +(𝐺𝑀(< 𝑟))/𝑟, where 𝐺 is the gravitational con- +stant and 𝑀(< 𝑟) is the mass enclosed within radius 𝑟 of the location +of the particle with the minimum gravitational potential, including +all particle types (DM, gas, stars, and black holes). The spherically +symmetric approximation is reasonable for our sample of galaxies, +which are invariably DM-dominated both globally and locally at all +radii. Furthermore, as will be seen below, the actual rotation curves +preferentially underestimate the (spherically averaged) circular ve- +locity curves, so the reduction in 𝑣circ by a few per cent (Binney +& Tremaine 2008, sec. 2.6.1b) due to this approximation tends to +slightly underestimate differences between the two. +Before calculating rotation curves, we set the velocity zero point +of each galaxy to the mean velocity of its 100 innermost ‘atomic’ +gas particles. We define atomic gas particles as those with H i mass +fractions of greater than 0.5. The H i mass fractions are calculated +as detailed in Oman et al. (2019) – in brief, these assume the em- +pirical prescription of Rahmati et al. (2013) to compute the neutral +fractions of particles, and the relation given in Blitz & Rosolowsky +(2006) to partition atomic from molecular gas. We then calculate +the angular momentum vector of the atomic gas disc by summing +the angular momenta of the innermost 50 per cent of atomic gas +particles (or 125,000, whichever is fewer). We rotate the coordinate +3 The ‘zoom-in’ region has an irregular shape and can extend beyond the +nominal radius of 2−3 Mpc. The condition that no low-resolution particles are +present in the FoF group ensures that the galaxies in our sample are sufficiently +far from the boundary of the ‘zoom-in’ region to avoid any spurious numerical +effects. +frame so that the angular momentum vector points in the 𝑧-direction, +placing the disc in the 𝑥-𝑦 plane. We measure the median azimuthal +velocity of atomic gas particles gravitationally bound to the galaxy +within cylindrical annuli of 0.5 kpc width. This bin width offers a +good compromise between limiting noise in the measurement and +resolving the structure in the rotation curves. We measure the rota- +tion curves out to the edge of the atomic gas disc, which we define +as the radius enclosing 90 per cent of the H i mass. +The rotation curves are not corrected for a possible radial pressure +gradient in the gas disc (often incorrectly termed an ‘asymmetric +drift correction’, see e.g. Valenzuela et al. 2007, appendix A). Such +corrections for our sample of galaxies (at 𝑧 = 0) were computed by +Oman et al. (2019) and are invariably small (≲ 10 per cent), except +for during mergers. Since we focus below on links between visible +(Sec. 2.3) gas kinematic features and rotation curve features we omit +further discussion of pressure-support corrections for simplicity, but +note that we do not expect that accounting for these would qualita- +tively change any of our conclusions. +This process was repeated for the 17 snapshots between 8.88 Gyr +and 13.76 Gyr to produce a set of circular velocity and rotation curves +over time, for each of the 33 galaxies in our sample. Fig. 1 shows the +resulting curves at each snapshot for the galaxy AP-L1-V6-5-0, as an +illustrative example. +2.3 Images and videos +We use the py-sphviewer (Benitez-Llambay 2015) toolkit to create +videos of galaxies in our sample over time to explore the kinds +of perturbations that affect them and their effects on their rotation +curves. +In py-sphviewer, the ‘observer’ is referred to as the ‘camera’. +The parameters specifying the camera position and orientation are +‘anchored’ at the times corresponding to snapshots. The camera is +pointed at the centre of the galaxy of interest (defined as the location +of the particle with the minimum gravitational potential), and placed +at a distance such that an image with a 90◦ field of view extends to +about twice the radius of the atomic gas disc. We track both a ‘face- +on’ view camera offset from the centre along the angular momentum +vector of the disc (see Sec. 2.2), and an ‘edge-on’ view camera +offset along an arbitrarily chosen orthogonal axis. Each galaxy is +visualised at 383 times evenly spaced between 8.88 and 13.76 Gyr. +Since the time when a visualisation is to be created does not in general +correspond to the time of a snapshot or snipshot, particle positions +are linearly interpolated between the two simulation outputs closest +to the desired time. The parameters describing the camera position +and orientation are also linearly interpolated to the desired time. +The close spacing of the snipshots in time means that a higher-order +interpolation scheme is not necessary. Finally, the normalisation of +the colour scale of the images is linearly interpolated between the +maximum pixel values in the first and last image (and likewise for +the minima) in each series to prevent ‘flickering’ and over/under- +saturation of the images. To focus attention on the object of interest, +the contributions of simulation particles more than 50 kpc from the +centre of the object of interest is exponentially suppressed with a scale +length of 50 kpc, such that anything beyond ∼ 300 kpc is essentially +invisible. +We use this procedure to create videos visualising the galaxy face- +on and edge-on for DM and gas particle types, and assemble these in +a variety of combinations (e.g. face-on and edge-on with composite +DM plus gas images; face-on showing DM and gas particles side by +side, edge-on showing DM and gas particles side by side) to create an +information-rich set of videos for each galaxy. We also produce a set +MNRAS 000, 1–15 (2023) + +4 +E. R. Downing & K. A. Oman +Figure 2. Selected frames from the face-on video of the galaxy AP-L1-V6-5-0, showing a gas-rich merger which strongly disrupts the gas disc and the rotation +curve around 𝑡 ≈ 10.5 ± 0.5 Gyr (shown in Figure 1). The partially stripped, but still gas-rich, secondary halo has a second approach, once again disturbing the +galaxy, around 𝑡 ≈ 11.7±0.1 Gyr. The first two images show the DM density only (grey-scale) and gas density only (purple-orange colour map) respectively, for +the same time as shown in the third panel. The further images are composites of DM density and gas density. On compatible pdf viewer software a video will play +before the figure is displayed. It shows the evolution of the galaxy over ∼ 4 Gyr with side-by-side face-on (left) and edge-on (right) views of the galaxy showing the +DM and gas density composite visualisation. The same video is available in the supplementary materials as AP-L1-V6-5-0-composite-edge-and-face.mp4 +(see Appendix A). +of figures for each galaxy showing its circular velocity and rotation +curve at the time of each snapshot side-by-side with an image of the +galaxy at the same time. Further details are given in the Appendix. +Fig. 2 shows a few example frames from a DM-plus-gas composite +face-on view video (on compatible software the video itself will be +shown before the figure is displayed) for the galaxy AP-L1-V6-5-0 +(the same galaxy as in Fig. 1), showing a gas-rich merger. +3 RESULTS +We examined the videos and rotation curves for each galaxy in detail. +We noted the characteristics of each galaxy, the types of perturbations +visibly affecting each, and their effects on the galaxy and its rotation +curve over time. We allowed the qualitative impressions formed dur- +ing this process to guide the creation of a quantitative summary of the +different types of perturbations affecting the galaxies. Our thoughts +regarding the advantages of this approach are summarised in Sec. 4.2 +below. +3.1 Quality of the rotation curve as a circular velocity tracer +In Fig. 3, we show the rotation curves at the times of the last 3 +snapshots (13.10, 13.43, 13.76 Gyr) of each galaxy, as well as their +circular velocity curves at the time of the last snapshot. The circu- +lar velocity curve (purple line) at the times of the two preceding +snapshots is invariably very similar to that at the time of the last +snapshot, so we omit them from the figure. Some rotation curves +accurately trace the circular velocity curve, while others do not. +Likewise, some galaxies have rotation curves that are highly variable +over the ∼ 660 Myr spanned by the three snapshots, while others are +quite stable. Guided by our visual impression of the curves in Fig. 3, +we devised a summary statistic 𝑄 that captures these features. It is +defined: +𝑄 = 1 +7 +�4𝑞0 + 2𝑞0,1 + 𝑞1,2 +� , +(1) +where: +𝑞0 = 𝑃0.75 +����� +𝑣rot,0(𝑅𝑖) +𝑣circ,0(𝑅𝑖) − 1 +���� +� +(2) +𝑞0,1 = 𝑃0.75 +����� +𝑣rot,0(𝑅𝑖) +𝑣rot,1(𝑅𝑖) − 1 +���� +� +(3) +𝑞1,2 = 𝑃0.75 +����� +𝑣rot,1(𝑅𝑖) +𝑣rot,2(𝑅𝑖) − 1 +���� +� +. +(4) +𝑣circ,0 is the circular velocity curve at the time of the last snapshot, +𝑣rot,0, 𝑣rot,1 and 𝑣rot,2 are the rotation curves at times of the last, +second-last and third-last snapshots, respectively, 𝑅𝑖 are the radii +where the curves are sampled, and 𝑃0.75(·) denotes the 75th per- +centile. The radii 𝑅𝑖 are evenly spaced every 500 pc out to the radius +enclosing 90 per cent of the H i mass of the galaxy – since this +varies with time, pairs of curves are compared using sampling points +common to the pair. Conceptually, 𝑞0 measures how well the rota- +tion curve traces the circular velocity curve (at the time of the final +snapshot), while 𝑞0,1 and 𝑞0,2 measure the time variability of the +rotation curve. In all cases, smaller values indicate better agreement. +The 75th percentile is used to enforce that ‘agreement’ between two +curves must extend over most of the curves ( 3 +4 of their extent) to ob- +tain a correspondingly small value. The three 𝑞 values are combined +as a weighted sum to give 𝑄, with slightly more weight placed on +the agreement between the rotation curve and circular velocity curve +than its time variability. +The panels of Fig. 3 are arranged in order of increasing 𝑄. It is +visually clear that the rotation curves of galaxies with higher 𝑄 do not +trace the circular velocity curve as closely as those of galaxies with +lower 𝑄, and are likewise more time-variable. We divide galaxies into +4 classes based on the 𝑄 statistic. Of the 33 galaxies in our sample, 3 +are labelled ‘class 1’ (‘excellent’ agreement between circular velocity +MNRAS 000, 1–15 (2023) + +Rotation curves of low-mass galaxies +5 +Table 1. Summary of perturbations affecting the gas kinematics in our sample of galaxies. The rows are in order of increasing 𝑄 parameter (column 3) (Sec. 3.1) +quantifying how closely the rotation curve traces the circular velocity curve; higher 𝑄 indicates poorer agreement. The range in 𝑄 is separated into 4 classes +(column 1) from class 1, ‘excellent agreement’, to class 4, ‘poor agreement’. The remaining columns provide quantitative estimates of the strength of various +perturbations with entries corresponding to a nominal ‘strong perturbation’ regime shown in bold. Further details are given in the specified sections. Column (4): +Time(s) since the big bang of the first pericentric passage of companions with DM mass ratio greater than 1:20. Currently strongly interacting companions are +marked ‡, and the entire entry is shown in bold (Sec. 3.2.1). (5): Peak (most negative) bulk cylindrical radial atomic gas inflow rate during the last ∼ 600 Myr, +values < −5 kpc Gyr−1 in bold (Sec. 3.2.2). (6): Peak bulk vertical (sgn(𝑧)𝑣𝑧) atomic gas expansion rate during the last ∼ 600 Myr, values > 1 kpc Gyr−1 in +bold (Sec. 3.2.2). (7): DM halo major-to-intermediate axis ratio 𝑏/𝑎 at 𝑧 = 0 within an aperture with radius equal to twice the radius enclosing 90 per cent of +the H i mass, values < 0.95 in bold (Sec. 3.2.3). (8): Angle between the angular momentum vectors of the inner and outer H i disc at 𝑧 = 0 , values > 30◦ in +bold (Sec. 3.2.4). (9): Speed of the galaxy with respect to diffuse gas between 1 and 2 times 𝑟200 at 𝑧 = 0 , speeds > 50 km s−1 in bold (Sec. 3.2.5). +First pericentre of merger +Peak radial bulk +Peak vertical bulk +DM halo +Warp angle +IGM wind speed +Class +Galaxy ID +Q +or interaction (Gyr) +flow (kpc Gyr−1) +flow (kpc Gyr−1) +𝑏/𝑎 +𝜃warp +𝑣wind (km s−1) +1 +AP-L1-V11-3-0 +0.04 +8.4 +−3.9 +−0.1 +0.99 +8◦ +26 +1 +AP-L1-V1-4-0 +0.09 +– +−3.3 +−0.0 +0.99 +8◦ +22 +1 +AP-L1-V4-8-0 +0.11 +– +−3.6 +0.4 +0.96 +9◦ +17 +2 +AP-L1-V6-12-0 +0.13 +– +−6.2 +0.2 +0.98 +6◦ +31 +2 +AP-L1-V6-8-0 +0.14 +10.9 +−2.7 +1.8 +0.99 +3◦ +82 +2 +AP-L1-V1-8-0 +0.14 +– +−5.0 +0.7 +0.97 +7◦ +15 +2 +AP-L1-V6-5-0 +0.15 +10.6 +−5.4 +0.1 +0.99 +8◦ +43 +2 +AP-L1-V10-6-0 +0.16 +– +−7.4 +−1.6 +0.91 +44◦ +27 +2 +AP-L1-V6-19-0 +0.16 +8.9‡ +−0.9 +0.0 +0.97 +13◦ +19 +2 +AP-L1-V11-6-0 +0.17 +– +−6.1 +1.8 +0.88 +5◦ +32 +2 +AP-L1-V10-14-0 +0.17 +8.9, 10.3 +−5.2 +1.6 +0.96 +6◦ +64 +2 +AP-L1-V4-10-0 +0.17 +– +−2.0 +1.3 +0.97 +3◦ +25 +3 +AP-L1-V4-6-0 +0.18 +– +−4.0 +1.0 +0.96 +49◦ +45 +3 +AP-L1-V11-5-0 +0.18 +9.8, 10.3‡, 11.2 +−5.0 +3.4 +0.91 +12◦ +66 +3 +AP-L1-V1-7-0 +0.18 +– +−4.6 +1.2 +0.95 +41◦ +23 +3 +AP-L1-V4-14-0 +0.18 +– +−5.9 +0.1 +0.98 +4◦ +30 +3 +AP-L1-V6-7-0 +0.20 +8.9, 11.5, 12.8‡ +−12.8 +−0.2 +0.87 +51◦ +60 +3 +AP-L1-V10-30-0 +0.20 +9.5 +−4.3 +0.5 +0.98 +4◦ +26 +3 +AP-L1-V6-16-0 +0.22 +10.3 +−4.7 +0.7 +0.96 +17◦ +10 +4 +AP-L1-V6-20-0 +0.23 +– +−10.4 +1.3 +0.84 +14◦ +50 +4 +AP-L1-V6-18-0 +0.24 +– +−8.6 +0.2 +0.90 +9◦ +24 +4 +AP-L1-V10-19-0 +0.26 +8.4, 8.9 +−6.2 +1.9 +0.93 +5◦ +8 +4 +AP-L1-V4-13-0 +0.26 +– +−3.5 +2.0 +0.87 +14◦ +10 +4 +AP-L1-V6-15-0 +0.27 +8.7, 11.5, 13.1 +−6.6 +0.8 +0.91 +3◦ +8 +4 +AP-L1-V10-22-0 +0.29 +– +−5.8 +0.6 +0.99 +11◦ +6 +4 +AP-L1-V6-6-0 +0.30 +10.3‡, 12.1, 13.1‡ +−7.1 +5.7 +0.88 +6◦ +26 +4 +AP-L1-V10-16-0 +0.33 +– +−2.0 +−0.1 +0.93 +10◦ +46 +4 +AP-L1-V10-20-0 +0.34 +11.2, 11.2, 11.2, 11.5, 12.1 +−5.9 +0.9 +0.92 +38◦ +88 +4 +AP-L1-V10-5-0 +0.37 +11.5‡ +−9.8 +1.9 +0.94 +84◦ +33 +4 +AP-L1-V1-6-0 +0.40 +13.4‡ +−7.9 +−0.2 +0.90 +19◦ +40 +4 +AP-L1-V10-17-0 +0.42 +9.5 +−7.4 +2.5 +0.93 +47◦ +31 +4 +AP-L1-V6-11-0 +0.48 +13.4 +−4.7 +1.1 +0.91 +22◦ +26 +4 +AP-L1-V10-13-0 +0.69 +10.0 +−4.6 +4.0 +0.89 +93◦ +26 +and rotation curves; 𝑄 < 0.125), 9 ‘class 2’ (‘good’ agreement; +0.125 ≤ 𝑄 < 0.175), 7 ‘class 3’ (‘fair’ agreement; 0.175 ≤ 𝑄 < +0.225) and 14 ‘class 4’ (‘poor’ agreement; 𝑄 ≥ 0.225). The 𝑄 values +of galaxies are tabluated in Table 1, with rows ordered by increasing +𝑄. The same table also provides a concise summary of various effects +that can (and often do) perturb the rotation curves of the galaxies. +We discuss each in turn in Sec. 3.2, but first give a brief qualitative +overview. +The overall impression that emerges immediately on visual in- +spection of the videos of the galaxies in our sample is one of +rich variety, both in galaxy properties and in the perturbations +that they are undergoing. Whilst selected with a simple criterion: +60 < 𝑣max/km s−1 < 120, there are large galaxies with gas discs +extending nearly 30 kpc in radius, but also tiny galaxies which barely +resemble discs (radii as small as 2 kpc). Some galaxies have obvi- +ous, strong gas outflows, while others are rapidly accreting new gas. +There are several instances of galaxies losing the majority of their +gas and then accreting a new disc that is highly inclined relative to +the previous disc, resulting in a strongly warped disc. Some galaxies +are very elongated and/or have frequent lopsided (harmonic of order +𝑚 = 1) perturbations, while others have a long-term stable, circular +disc. Mergers and interactions with companions are common, with a +range of impact parameters. In many cases the gas merges quickly, +while the secondary DM halo completes several orbits before fully +merging, visibly disturbing the gas kinematics at each pericentric +passage. A few complicated triple mergers are also present in the +sample. Other common disruptions include non-merging interactions +with gas-rich or gas-less haloes (for our selection in 𝑣max, we do not +find any star-less or ‘dark’ galaxies that significantly perturb the gas +kinematics). In some cases a wind from motion through the inter- +MNRAS 000, 1–15 (2023) + +6 +E. R. Downing & K. A. Oman +0 +5 +10 +15 +20 +25 +0 +20 +40 +60 +80 +100 +120 +AP-L1-V11-3-0 +Q = 0.04 +0 +5 +10 +15 +20 +0 +20 +40 +60 +80 +100 +AP-L1-V1-4-0 +Q = 0.09 +0 +2 +4 +6 +8 +10 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V4-8-0 +Q = 0.11 +0 +2 +4 +6 +8 +10 +12 +14 +0 +20 +40 +60 +80 +AP-L1-V6-12-0 +Q = 0.13 +0.0 +2.5 +5.0 +7.5 +10.0 +12.5 +15.0 +17.5 +0 +20 +40 +60 +80 +AP-L1-V6-8-0 +Q = 0.14 +0 +2 +4 +6 +8 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V1-8-0 +Q = 0.14 +0 +2 +4 +6 +8 +10 +12 +0 +20 +40 +60 +80 +AP-L1-V6-5-0 +Q = 0.15 +0 +2 +4 +6 +8 +10 +12 +14 +0 +20 +40 +60 +80 +100 +AP-L1-V10-6-0 +Q = 0.16 +0 +2 +4 +6 +8 +10 +0 +20 +40 +60 +80 +AP-L1-V6-19-0 +Q = 0.16 +0 +5 +10 +15 +20 +25 +0 +20 +40 +60 +80 +100 +AP-L1-V11-6-0 +Q = 0.17 +0 +5 +10 +15 +20 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V10-14-0 +Q = 0.17 +0 +5 +10 +15 +20 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V4-10-0 +Q = 0.17 +0 +1 +2 +3 +4 +5 +6 +7 +0 +20 +40 +60 +80 +100 +AP-L1-V4-6-0 +Q = 0.18 +0 +5 +10 +15 +20 +25 +0 +20 +40 +60 +80 +100 +AP-L1-V11-5-0 +Q = 0.18 +0 +1 +2 +3 +4 +5 +6 +7 +0 +20 +40 +60 +80 +AP-L1-V1-7-0 +Q = 0.18 +0 +2 +4 +6 +8 +10 +12 +14 +0 +20 +40 +60 +80 +Velocity [km s−1] +AP-L1-V4-14-0 +Q = 0.18 +0 +2 +4 +6 +8 +10 +12 +0 +20 +40 +60 +80 +AP-L1-V6-7-0 +Q = 0.20 +0 +2 +4 +6 +8 +10 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V10-30-0 +Q = 0.20 +0 +1 +2 +3 +4 +5 +6 +7 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V6-16-0 +Q = 0.22 +0.0 +0.5 +1.0 +1.5 +2.0 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V6-20-0 +Q = 0.23 +0 +1 +2 +3 +4 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V6-18-0 +Q = 0.24 +0 +2 +4 +6 +8 +10 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V10-19-0 +Q = 0.26 +0 +2 +4 +6 +8 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V4-13-0 +Q = 0.26 +0 +2 +4 +6 +8 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V6-15-0 +Q = 0.27 +0 +2 +4 +6 +8 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V10-22-0 +Q = 0.29 +0 +5 +10 +15 +20 +0 +20 +40 +60 +80 +AP-L1-V6-6-0 +Q = 0.30 +0 +2 +4 +6 +8 +10 +12 +14 +0 +20 +40 +60 +80 +AP-L1-V10-16-0 +Q = 0.33 +0 +2 +4 +6 +8 +10 +0 +20 +40 +60 +80 +AP-L1-V10-20-0 +Q = 0.34 +0 +2 +4 +6 +8 +10 +0 +20 +40 +60 +80 +100 +120 +AP-L1-V10-5-0 +Q = 0.37 +0 +1 +2 +3 +4 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V1-6-0 +Q = 0.40 +0 +1 +2 +3 +4 +5 +6 +7 +8 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V10-17-0 +Q = 0.42 +0 +1 +2 +3 +4 +5 +0 +10 +20 +30 +40 +50 +60 +70 +AP-L1-V6-11-0 +Q = 0.48 +0 +1 +2 +3 +4 +5 +6 +7 +Radius [kpc] +0 +20 +40 +60 +80 +100 +AP-L1-V10-13-0 +Q = 0.69 +Figure 3. Rotation curves for all galaxies for the last three simulation snapshots (13.10, 13.43 and 13.76 Gyr, shown in increasing opacity) coloured by class – +class 1 (green, 𝑄 < 0.125); class 2 olive, 0.125 ≤ 𝑄 < 0.175); class 3 (orange, 0.175 ≤ 𝑄 < 0.225); class 4 (red 𝑄 ≥ 0.225). 𝑄 is a measure of how well the +rotation curve traces the circular velocity curve over time (see Sec. 3). The purple curves show the 𝑧 = 0 circular velocity curve of each galaxy. +galactic medium (IGM) seems to cause strong 𝑚 = 1 deformations +of the disc. +3.2 Mechanisms perturbing the rotation curve +3.2.1 Mergers and close companions +Close interactions and mergers with gas-rich companions cause the +most obvious disturbances to rotation curves. Gas-less (but not nec- +essarily dark) companions cause less disruption, but can still visibly +disturb the gas kinematics. In most cases the effect of a gas-less com- +panion on the rotation curve is minimal, and even in the most extreme +cases the rotation curve is usually still a reasonably good tracer of +the circular velocity curve, if no other perturbation is ongoing. Sim- +ilar statements apply to the gas-less remnant of an initially gas-rich +companion as it returns on subsequent orbital passages. We list two +examples of interactions with companions (and all other types of +perturbations discussed in subsections below) and where they can be +most clearly seen in our collection of visualisations in Table 2. +Using the merger trees (Sec. 2.1), we identify all companion galax- +ies that merged into each galaxy in our sample and their progenitors. +In addition, we track the progenitors and descendants of all compan- +ion galaxies, defined as those found in the same FoF group as the +galaxy at any time (but that did not later merge). For each compan- +ion and merged object, we find its maximum DM mass at any time +and compare it to the maximum DM mass of the galaxy of interest, +discarding any with a mass ratio less than 1:20. We found that in- +teraction with smaller mass ratios caused little visible disturbance to +MNRAS 000, 1–15 (2023) + +Rotation curves of low-mass galaxies +7 +Table 2. +Perturbation type +Visualisation file +Time (Gyr) +Description and comments +Merger/companion (§3.2.1) +AP-L1-V11-3-0-gas-edge-and-face.mp4 +10.4 – 12.0 +Merger with the gas disc of a companion. The com- +panion arrives on a prograde orbit nearly in the plane +of the disc around time 10.2 (this is actually the sec- +ond passage, the first was around 8.4 Gyr, at this time +the gas discs interacted but did not collide); the disc +survives and settles (by time ∼ 12.7 Gyr). +Merger/companion (§3.2.1) +AP-L1-V6-15-0-gas-edge-and-face.mp4 +11.2 – 12.0 +Merger with the gas disc of a companion. The com- +panion arrives on an oblique prograde orbit; the disc +is almost completely destroyed and does not re-form +until time ∼ 13.3 Gyr. +Radial inflows (§3.2.2) +AP-L1-V4-14-0-gas-edge-and-face.mp4 +13.0 – 13.5 +Gas is visibly ejected from the disc, likely by a series +of supernova explosions, around time 12.5 Gyr. This +gas quickly begins to settle back onto the disc. While +this is ongoing, the entire disc contracts radially. +Vertical outflows (§3.2.2) +AP-L1-V4-13-0-gas-edge-and-face.mp4 +13.0 – 13.5 +Several prominent whisps of ejected gas are visible +both above and below the disc (in the right panel of +the video), launched over a period of a few hundred +megayears. +Elongated halo (§3.2.3) +AP-L1-V11-6-0-face-gas-and-dm.mp4 +all +The DM halo is visibly elongated throughout, driving +transient lopsided (𝑚 = 1 harmonic) and bisymmetric +(𝑚 = 2) deformations of the gas disc. For example, at +time 13.0 Gyr, the disc is both elongated and lopsided. +The position angle of the elongation of the gas disc is +visibly correlated with the position angle of the DM +halo throughout. +Warped disc (§3.2.4) +AP-L1-V4-6-0-gas-edge-and-face.mp4 +13.0 – 13.5 +By 11 Gyr the gas disc is very small after being con- +sumed by star formation and losing gas to supernova +feedback. Between 12 and 13 Gyr a large amount of +gas accretes onto the disc, misaligned with the existing +disc. By 13.5 Gyr the edge-on planes of the inner and +outer discs are clearly visible in the right panel of the +visualisation. +IGM wind (§3.2.5) +AP-L1-V1-4-0-gas-edge-and-face.mp4 +all +Throughout the visualisation the diffuse gas surround- +ing the disc has a noticeable net flow from right to left +in the image, in both the face-on and edge-on views. +the gas discs, and no noticeable perturbation to their rotation curves. +We define the time of the first pericentric passage of an interaction +as the time of the earliest simulation snapshot when both galaxies +are found in the same FoF group and the sign of the radial velocity +difference between the companion and host is positive. +In Table 1, we list the times of first pericentric passages for all +such interactions, excluding those before 8 Gyr. There are 25 in- +teractions in total, occurring in 15 galaxies. All of the tabulated +companions/mergers are initially gas rich – their peak (over time) +gas-to-stellar mass ratios are ≥ 1.7. We also note that all compan- +ions and mergers with mass ratios greater than 1:20 had stars – +perturbations due to ‘dark’ galaxies are unimportant for the galaxies +in our sample. The collision of two gas discs in a 1:20 or greater +merger invariably strongly and globally disturbs the gas morphol- +ogy and kinematics, making any other possible perturbations moot. +We therefore flag ongoing interactions (which may persist long af- +ter the first pericentric passage), defined as those where the closest +approach of the companion occurs during the last 3 simulation snap- +shots (∼ 650 Myr) and is closer than 25 kpc. These are marked ‡ in +Table 1 and shown with open symbols in later figures. +It is clear from Table 1 that galaxies with an ongoing interaction +with a gas-rich companion have rotation curves that are poor trac- +ers of the circular velocity (classes 3 & 4, with one exception in +class 2 where the mass ratio was close to 1:20 to begin with and the +companion has been heavily stripped by the end of the simulation). +However, galaxies may recover quickly (in a little more than a dy- +namical time) from earlier interactions, depending on the mass ratio, +impact parameter, and the relative inclinations of the gas discs. For +example, galaxy AP-L1-V11-3-0 (class 1) finished merging with a +massive (𝑀tot ∼ 3.3×1010 M⊙, dark matter mass ratio ∼ 0.13) com- +panion around 12.5 Gyr (see entry in Table 2 for details), but since +the approach was nearly in the plane of the gas disc, the disruption +of the disc was limited and the gas settled after the merger ended, +such that the rotation curves trace the circular velocity very well by +12.7 Gyr and thereafter. +On the other hand, galaxy AP-L1-V6-15-0 (class 4) experienced +an oblique collision with a galaxy (𝑀tot ∼ 4.3×109 M⊙, dark matter +mass ratio ∼ 0.15), dispersing nearly all of the primary galaxy’s gas +on its first approach (∼ 11.5 Gyr), before the now partially stripped, +but still gas-rich companion halo returns and the rest of the gas +merges (∼ 12.2 Gyr). This dramatic event severely disrupts both the +gas disc and DM halo, and the gas dynamics are entirely out of +equilibrium until ∼ 13.0 Gyr where they begin to settle. By 𝑧 = 0 the +rotation curve still underestimates the circular velocity overall and +has radially localised features (e.g. ‘wiggles’). +MNRAS 000, 1–15 (2023) + +8 +E. R. Downing & K. A. Oman +−12.5 +−10.0 +−7.5 +−5.0 +−2.5 +0.0 +Peak bulk radial inflow +rate [kpc Gyr−1] +weak bulk flows +strong bulk flows +ongoing interaction +with companion +0.0 +0.2 +0.4 +0.6 +Q +−2 +0 +2 +4 +6 +Peak bulk vertical outflow +rate [kpc Gyr−1] +Figure 4. Correlations of bulk gas flows with degree to which the rotation +curve traces the circular velocity curve, 𝑄. Upper panel: The average radial +velocity (in cylindrical coordinates) of ‘atomic’ gas particles in each galaxy +in our sample is calculated at each of the last 3 snapshots (13.10, 13.43 +and 13.76 Gyr), and the minimum value (i.e. peak inflow rate) is plotted on +the vertical axis. Lower panel: The average vertical velocity away from the +disc midplane (i.e. sgn(𝑧)𝑣𝑧) of ‘atomic’ gas particles in each galaxy in +our sample is calculated at each of the last 3 snapshots, and the maximum +value (i.e. peak outflow rate) is plotted on the vertical axis. Galaxies currently +strongly interacting with a companion (marked ‡ in Table 1) are plotted with +open symbols. Galaxies with stronger bulk flows have preferentially higher 𝑄 +values. Galaxies with peak radial inflow rates stronger (more negative) than +−5 kpc Gyr−1 and/or peak vertical outflow rates greater than 1 kpc Gyr−1 (red +dashed lines; galaxies with flows stronger than either or both limits are shown +with red markers) are not found in class 1 (green background), and no galaxies +without strong bulk flows are found in class 4 (red background). +3.2.2 Bulk non-circular gas flows +Bulk non-circular gas flows (e.g. radial or vertical flows) directly +violate the assumption of rotational support implied by the expecta- +tion that the rotation curve of a galaxy should agree with its circular +velocity curve. Bulk outflows in low-mass galaxies in APOSTLE are +driven predominantly by the injection of thermal energy by super- +novae and are preferentially ejected along the ‘path of least resis- +tance’: vertically from the disc (see Table 2 for an example). Bulk +inflows within the disc, on the other hand, tend to be radial and are +usually associated with gas accretion (see example in Table 2). +We quantify bulk non-circular gas flows as follows. We focus on the +atomic gas disc by first selecting only ‘atomic’ gas particles, which +we recall that we define as those with H i mass fractions of > 0.5, and +then selecting only those particles within a cylindrical aperture with a +radius equal to the radius enclosing 90 per cent of the H i mass of the +galaxy, and a half-height equal to the half-height enclosing 90 per cent +of the H i mass. We calculate the radial (in cylindrical coordinates) +and vertical bulk flow rates of the selected particles as their mass- +weighted average radial and vertical velocities. For the vertical flow +rate, we use the speed towards or away from the disc midplane +(i.e. sgn(𝑧)𝑣𝑧). Calculating these flow rates for a few consecutive +simulation snapshots revealed that they are highly time-variable, +motivating us to choose a summary statistic. The peak (most negative) +radial inflow rates and peak (most positive) vertical outflow rates +from the last 3 snapshots (13.10, 13.43 and 13.76 Gyr) are plotted in +Fig. 4 against the 𝑄 parameter defined in equation (1). To emphasize +that these flow rates capture a global contraction/expansion of the +disc rather than e.g. the speed of gas selected to be ‘outflowing’ or +‘inflowing’, we show values in units of kpc Gyr−1 (rather than e.g. +km s−1). We also note that ‘harmonic’ non-circular motions, such +as a bar-like distortion of the gas orbits, are not captured in this +measurement because such distortions do not result in a net transport +of gas. +Fig. 4 shows that galaxies with stronger inflows and/or outflows +tend to have rotation curves that are poorer tracers of their circu- +lar velocity curves. We illustrate this by plotting galaxies with radial +inflow rates stronger (more negative) than −5 kpc Gyr−1 and/or verti- +cal outflow rates greater than 1 kpc Gyr−1 (approximately the median +flow rates for galaxies in our sample) with red markers. Entries in +Table 1 exceeding these thresholds are also highlighted in bold face. +By this measure, most galaxies in our sample (26/33) have strong +bulk flows in at least one of these two directions, but no galaxies in +our class 1 (𝑄 < 0.125) do. There is furthermore a clear correlation +between each of the two peak flow rates and 𝑄, albeit with large +scatter. +The connection between bulk flows, in the vertical direction in +particular, was one of the first that we noticed in our initial visual +analysis of our collection of videos: a lack of visible outflows from +a galaxy is a strong predictor that its rotation curve will be a good +tracer of its circular velocity curve. However, given the diversity +of perturbations which can cause rotation curves to differ from the +circular velocity curve, having weak bulk flows does not guarantee +this to be the case, as is evident from exceptions such as AP-L1-V10- +16-0. This galaxy has amongst the weakest bulk flows in our sample, +but falls in class 4 (𝑄 ≥ 2.225). +3.2.3 Dark matter halo shape +Elongated or triaxial DM haloes give rise to non-circular gas orbits, +with gas often visibly sloshing around in the aspherical potential +(see Table 2 for an example). In galaxies where this mechanism is +effective, the rotation curves are highly variable as strong, transient +lopsided (harmonic of order 𝑚 = 1) and bisymmetric (𝑚 = 2) modes +are excited in the gas disc. Fig. 5 shows the anti-correlation between +the intermediate-to-major axis ratio of the DM halo and the 𝑄 pa- +rameter defined in equation (1). We focus on the shape of the halo +in the region occupied by the disc by calculating axis ratios using +DM particles in a spherical aperture with a radius equal to twice the +radius enclosing 90 per cent of the galaxy’s H i mass. The squares +of the axis lengths are proportional to the eigenvalues of the reduced +inertia tensor: +𝐼𝑖 𝑗 = +� +𝑛 𝑚𝑛 +𝑟𝑛,𝑖𝑟𝑛, 𝑗 +𝑟2𝑛 +� +𝑛 𝑚𝑛 +, +(5) +where 𝑟𝑛 and 𝑚𝑛 are the coordinate vector and mass of particle 𝑛, +respectively. +Even very small departures from 𝑏/𝑎 = 1 seem to be sufficient to +drive large changes in the rotation curves. The red dashed line in Fig. 5 +marks 𝑏/𝑎 = 0.95 – no galaxies with 𝑏/𝑎 < 0.95 fall in our class 1 +(𝑄 < 0.125), and all save one class 4 (𝑄 ≥ 0.225) galaxies have +𝑏/𝑎 < 0.95. We highlight the entries for galaxies with 𝑏/𝑎 < 0.95 in +MNRAS 000, 1–15 (2023) + +Rotation curves of low-mass galaxies +9 +0.0 +0.2 +0.4 +0.6 +Q +0.850 +0.875 +0.900 +0.925 +0.950 +0.975 +1.000 +b/a +not currently +interacting +ongoing interaction +with companion +Figure 5. Anti-correlation between DM halo intermediate-to-minor axis ratio +𝑏/𝑎 (measured from reduced inertia tensor of DM particles within a spherical +aperture with radius equal to twice the radius enclosing 90 per cent of galaxy’s +H i mass) and the degree to which the rotation curve traces the circular velocity +curve, 𝑄. An aspherical halo (𝑏/𝑎 ≲ 0.95, marked by the dashed red line and +in bold in Table 1) is a strong predictor of poor agreement between the rotation +curve and the circular velocity curve, but a spherical halo does not guarantee +close agreement. The coloured background marks the same intervals in 𝑄 as +introduced in Fig. 3. +Table 1 – these make up 17 of the 33 galaxies in our sample. The anti- +correlation in the figure has considerable scatter, reflecting the fact +that a galaxy with a spherical halo can be perturbed by some other +mechanism, but an aspherical halo seems to be a strong predictor of +the rotation curve being a poor tracer of the circular velocity curve. +Although not shown in Fig. 5, we also investigated trends in 𝑄 +as function of the minor-to-major axis ratio (𝑐/𝑎) and the triaxiality +parameter (𝑇 ≡ 𝑎2−𝑏2 +𝑎2−𝑐2 ). These show somewhat weaker trends than +that with 𝑏/𝑎, suggesting that a prolate or triaxial halo shape (i.e. +𝑏/𝑎 ≠ 1) has a stronger perturbative effect than an oblate shape +(𝑏/𝑎 ∼ 1). This agrees with intuition: a light, rotationally supported +disc has possible stable configurations in the potential an oblate halo, +but is unstable in a prolate or triaxial potential. +The mass of the gas disc also plays a role. A more massive disc may +resist the perturbative effect of an aspherical halo, or even ‘spheri- +calise’ the halo if it is massive enough. We will return to the impor- +tance of the gas disc mass in Sec. 3.3 below. +3.2.4 Warped discs +Several galaxies in our sample have visible warps in their gas discs; +one example of a prominent warp is listed in Table 2. We quantify the +strength of a warp by the angle 𝜃warp between the angular momentum +vectors of the inner and outer gas discs, which we define as the inner +30 per cent and outer 60-90 per cent of the H i gas by mass. We plot +𝜃warp against our 𝑄 parameter defined in equation (1) in Fig. 6. Most +galaxies in our sample have warp angles of less than ∼ 20◦ and these +span the entire range in 𝑄, but a minority have large warp angles +caused by rapid accretion of gas with angular momentum strongly +misaligned with the existing disc, or an interaction with a companion +– these galaxies have preferentially higher 𝑄 values and fall in our +classes 3 & 4 (𝑄 ≳ 0.175). We flag galaxies with 𝜃warp > 30◦ +(dashed line in the figure) as strongly warped, and highlight their +entries in Table 1 in bold face. There are 8 such galaxies in our +0.0 +0.2 +0.4 +0.6 +Q +0 +20 +40 +60 +80 +θwarp [deg] +not currently +interacting +ongoing interaction +with companion +Figure 6. Correlation between warp angle 𝜃warp (defined as the angle between +the angular momentum vectors of the inner 30 per cent and the outer 60 − +90 per cent of the H i gas, by mass) and the degree to which the rotation curve +traces the circular velocity curve, 𝑄. A strong warp (≳ 30◦, marked by the +dashed red line and in bold in Table 1) is associated with a poor agreement +between the rotation and circular velocity curves (𝑄 ≳ 0.2). The coloured +background marks the same intervals in 𝑄 as introduced in Fig. 3. +sample. We note that the influence of a warp on the rotation curve +is somewhat exaggerated in our analysis because we have measured +the rotation curves as the median azimuthal velocity of particles in +a fixed plane aligned with the inner disc. The rotation speed in a +warped outer disc is therefore underestimated by about a factor of +cos 𝜃warp, which will be reflected in the measured 𝑄 value. +3.2.5 IGM wind +In the visualisations of some of our galaxies, a ‘wind’ blowing against +the gas disc due to its motion through the IGM is clearly visible, +and appears to deform the disc, often resulting in a lopsided disc +displaced ‘downwind’. One example where this effect is especially +clear is given in Table 2. We note, however, that the galaxy in question +is in class 1 (𝑄 = 0.09) and has an IGM wind speed at 𝑧 = 0 (see +below for details) close to the median in our sample of galaxies. +This highlights both the difficulty in quantifying the strength of the +wind and its potentially quantitatively subtle effect on the kinematics +of the gas disc, despite the perturbative effect of the wind being +visually very clear. As a consequence, our efforts to quantify such +perturbations have yielded less clear-cut results than for the other +types of perturbations discussed above, suggesting that perturbation +due to a wind may be more nuanced. The effect on the rotation curve +likewise often seems to be fairly subtle. +We estimate the speed of the IGM wind as follows. We first select +gas particles in a spherical shell between one and two times the +virial4 radius around the galaxy. In order to avoid undue bias by +other nearby galaxies, we further restrict our selection to include only +those particles not gravitationally bound to any subhalo according to +the halo finder. We take the median velocity in the rest frame of the +galaxy (the same frame used when measuring the rotation curves) of +the remaining selected particles to be the IGM wind velocity. We have +4 We define the virial radius as the radius of a sphere within which the mean +matter density is 200 times the critical density 𝜌crit = 3𝐻 2 +0 /8𝜋𝐺. +MNRAS 000, 1–15 (2023) + +10 +E. R. Downing & K. A. Oman +0.0 +0.2 +0.4 +0.6 +Q +0 +20 +40 +60 +80 +vwind [km s−1] +not currently +interacting +ongoing interaction +with companion +Figure 7. IGM wind speed 𝑣wind, calculated as the median velocity of the +gas particles between 1 and 2 times 𝑟200 of the galaxy and not belonging +to any FoF group in a frame of reference where the gas disc is at rest (see +Sec. 3.2.5 for details), plotted against the degree to which the rotation curve +traces the circular velocity curve, 𝑄 (Sec. 3.1). No strong trend is visible. +The dashed red line marks 50 km s−1; we consider galaxies above this line +to be the strongest outliers in 𝑣wind, and mark the corresponding values in +bold in Table 1. The coloured background marks the same intervals in 𝑄 as +introduced in Fig. 3. +verified that the conclusions that we reach are not very sensitive to +the precise radial range used (within a factor of about 3), or whether +or not bound particles are included. +The speed of the wind 𝑣wind is plotted against the 𝑄 parameter +defined in equation (1) in Fig. 7. Any correlation is less clear than +those seen in Figs. 4–6 above, but there is tentative evidence that the +galaxies with the highest IGM wind speeds in our sample have higher +𝑄 values, or at least avoid the lowest 𝑄 values. We draw a boundary +at 50 km s−1 (dashed line in the figure) separating the galaxies with +the highest wind speeds (5 of the 33 in our sample) from the others, +and highlight the entries corresponding to the galaxies above this +threshold in Table 1. +In addition to the caveats listed above, we are cautious in our +interpretation of perturbations due to the IGM wind because we +struggle to find a clear correspondencebetweenthegalaxies where we +identified what appeared to be a wind in our visualisations and those +with a high wind speed (or other similar quantitative measures that we +explored). Furthermore, our impression from our visual inspection is +that periods of strong IGM wind are often short-lived (the example in +Table 2 is an exception to this), and any perturbation of the rotation +curve does not seem to persist after the wind subsides. +3.2.6 Summary +Taking all of these various kinds of perturbations into account, it +is perhaps unsurprising that so few galaxies in our sample have +a rotation curve that closely traces their circular velocity curve at +𝑧 = 0. However, looking at Table 1, there are also a few galaxies that +have avoided any obvious recent disturbance and yet have rotation +curves that are poor tracers of their circular velocity curves. The +galaxy AP-L1-V10-30 presents an intriguing case. The visualisations +(e.g. AP-L1-V10-30-gas-edge-and-face.mp4) do not reveal any +obvious perturbations in any of the categories discussed above at late +times, except perhaps some vertical outflows from the disc. Inspecting +its entry in Table 1, none of its properties exceed our (admittedly +somewhat arbitrary) thresholds for strong perturbations. And yet, +its late-time rotation curve (Fig. 3) significantly underestimates the +circular velocity curve in the central ∼ 2 kpc, and is time-variable +in the outer regions of the disc. The presence of such an example in +our sample of galaxies emphasizes that we have only scratched the +surface of a complex topic: it is clear that many types of perturbations +significantly influence the gas kinematics in low-mass galaxies, but +a more complete understanding of the prevalence and importance +of each type will require further study, often on a galaxy-by-galaxy +basis. +Ultimately, given the intrinsically limited information available +from observations of real galaxies, a practical question to ask is: are +galaxies where the rotation curve is a good tracer of the circular ve- +locity curve separated from others in terms of observable properties? +In our exploration of our sample of galaxies, it became clear very +quickly that gas mass (e.g. at fixed stellar mass) plays an important +role. Once galaxies with ongoing interactions or mergers are removed +from consideration, galaxies with higher gas mass are more likely +to have rotation curves that trace the circular velocity well, and vice +versa (see further discussion in Sec. 3.3 below). This is tentatively +consistent with the stabilising effects of a massive disc against some +perturbations, including the influence of an aspherical DM halo (the +type of perturbation with the clearest effect out of those that we +investigated, save mergers), as mentioned above. +3.3 Trends with galaxy scaling relations +We plot each pair-wise relation between gas mass, stellar mass, and +𝑣max at 𝑧 = 0 in Fig. 8 (tabulated values are available in Oman et al. +2019, table A1). We also plot the data from the SPARC compilation +(Lelli et al. 2016a) with small grey points5. The simulated galaxies +broadly follow observed trends in these relations; we do not discuss +the comparison further (see Oman et al. 2019, for a detailed com- +parison). Points for simulated galaxies are coloured by their their +class, from green (class 1) to red (class 4). We plot galaxies with a +recent merger or interaction with a companion (see Sec. 3.2.1) with +an open symbol. Considering the lower panels, we highlight that all +non-interacting galaxies with gas mass greater than 1.5 × 109 M⊙ (9 +galaxies) are in class 1 or 2, while only 2/17 non-interacting galaxies +with lower gas masses are. We note that galaxies with recent/ongoing +interactions or mergers have preferentially higher gas masses (at fixed +𝑣max) than non-interacting galaxies – this is unsurprising, since all +companion galaxies of galaxies in our sample bring a lot of gas with +them (see Sec. 3.2.1). There is no similarly clear separation between +points of different colours in maximum circular velocity or stellar +mass, besides some weak trends coming from the fact that gas mass +correlates with both of these parameters. +It is tempting to attribute gas mass being more important than +stellar mass in this context to the galaxies in our sample having +gas masses exceeding their stellar masses, such that the gravitational +influence of the stars on the gas kinematics is not dominant. However, +the gas is typically more extended, so the stars can still dominate the +gravitational potential near the centre, and can exert a strong non- +gravitational influence through supernova feedback. The strength +of supernova feedback would be expected to correlate instead with +5 The SPARC compilation provides a ‘quality flag’ from 1 (best) to 3 (not +suitable for mass modelling). Since in this work we are interested in galaxies +spanning the full range in rotation curve quality, we include all SPARC +galaxies in all figures where they are shown. +MNRAS 000, 1–15 (2023) + +Rotation curves of low-mass galaxies +11 +109 +1010 +M⋆ [M⊙] +SPARC (Lelli et al. 2016a) +not currently +interacting +ongoing interaction +with companion +109 +1010 +M⋆ [M⊙] +109 +1010 +Mgas = 1.33MHI [M⊙] +class 1 +class 2 +class 3 +class 4 +100 +60 +70 +80 +90 +vmax [km s−1] +Figure 8. Pair-wise relations between gas mass, stellar mass, and 𝑣max, with APOSTLE galaxies plotted as larger points and coloured by class (class 1 with +𝑄 < 0.125 as green, class 2 with 0.125 ≤ 𝑄 < 0.175 as olive, class 3 with 0.175 ≤ 𝑄 < 0.225 as orange, class 4 with 𝑄 ≥ 0.225 as red) and open points +showing galaxies with a recent gas-rich merger. SPARC data from Lelli et al. (2016a) are plotted with small grey points. The areas outside of our selection +60 < 𝑣max/km s−1 < 120 are shaded in the right panels. +recent star formation, but we did not find any strong trend with recent +star formation rate (not shown). +Considering the lower-right panel of Fig. 8, we were surprised not +to find a stronger dependence on a combination of 𝑣max and 𝑀gas, as +might be expected if the gas-to-total mass ratio was a primary driver +of 𝑄. We note, however, that a larger sample of simulated galaxies +would be very helpful in exploring these issues further. +We next turn our attention to the possible biases introduced into ob- +servational scaling relations involving rotation curve measurements +by the types of perturbations discussed above. We emphasize that we +investigate here only the ‘direct’ impact due to the difference between +the rotation curve and the circular velocity curve – the rotation curves +that we consider are those that an observer with perfect knowledge of +the gas kinematics would measure: the median azimuthal velocity as a +function of radius. With real observations, this direct impact is likely +to be compounded by additional errors induced by e.g. attempting +to model a non-equilibrium system assuming equilibrium dynamics, +assuming circular orbits when the actual orbits are non-circular, etc. +(see e.g. Read et al. 2016; Oman et al. 2019; Sellwood et al. 2021; +Roper et al. 2022). We consider the BTFR (McGaugh et al. 2000), +and 𝑉fid − 𝑉max relation (Santos-Santos et al. 2020) quantifying the +shapes of rotation curves, as illustrative examples. +3.3.1 The BTFR +The upper panel of Fig. 9 shows the BTFR of the galaxies in our +sample, along with galaxies from the SPARC compilation (Lelli +et al. 2016a) and the BTFR of SPARC galaxies reported by Lelli +et al. (2016b) to provide context. We do not undertake a comparison +with the observed BTFR in this work, as this has previously been +addressed by Oman et al. (2016) and Sales et al. (2017). In this panel, +the horizontal axis shows 𝑣max, the maximum of the circular velocity +curve. This can be thought of as the ‘truth’ that is obtained in the ideal +case where the gas rotation curve follows the circular velocity curve +and the measurement of the rotation curve is without error. We also +plot an indicative linear fit to the points in this panel as a black solid +line, excluding interacting/merging galaxies (open symbols) from the +calculation. The fit minimizes the sum of the squared offsets in 𝑀bar +from the BTFR. The best-fitting slope (𝑀bar ∝ 𝑣𝛼max) is 𝛼 = 3.9. +In the centre panel of Fig. 9, the coloured points show a mea- +surement of the maximum rotation velocity of the gas, determined +MNRAS 000, 1–15 (2023) + +12 +E. R. Downing & K. A. Oman +from the flat portion of the rotation curve following the approach of +Roper et al. (2022, Appendix C; if the rotation curve is still rising +at the outermost point, the value at this point is used) – we label +this 𝑣flat. Each point is joined to its position in the upper panel by a +solid line. Unsurprisingly, galaxies in our classes 3 & 4 move further +(on average) from their positions in the upper panel than those in +our classes 1 & 2. Furthermore, nearly all points shift to the left, as +the rotation curves preferentially underestimate the circular velocity +curves. This is emphasized in the lower panel of the figure, where the +ratio 𝑣flat/𝑣max is plotted against 𝑣max – here it is clear that the un- +derestimates get systematically worse towards lower 𝑣max (or lower +𝑀bar). The 𝑣max and 𝑣flat values for each galaxy in our sample are +tabulated in Table 3. +The trends evident in the bottom panel of Fig. 9 mean that the +BTFR is biased to a higher normalisation (because 𝑣flat systemati- +cally underestimates 𝑣max), and shallower slope (because the under- +estimates get worse at lower 𝑣max). The change in slope is illustrated +in the centre panel by the dashed line, which has a slope of 𝛼 = 3.1. +This shows a linear fit to the filled points in this panel, similar6 to +the solid line (repeated in the upper and centre panels). +In the APOSTLE simulations, the BTFR has a steep cutoff around +𝑣max = 50 km s−1 (see Oman et al. 2016; Sales et al. 2017). Replacing +𝑣max with 𝑣flat seems to soften the cutoff, potentially enough for the +trend to become more reminiscent of the constant slope often claimed +in observational studies (e.g. McGaugh et al. 2000; Ponomareva et al. +2018; Lelli et al. 2019), although an analysis including galaxies at +lower 𝑣max would be needed to confirm this. +If observed galaxies are subject to broadly similar perturbations +as those that we observe in our simulations, which seems likely, +then the observed BTFR is probably biased in a similar sense. The +magnitude of the effect, however, depends on the details of each type +of perturbation and their relative frequencies, which the simulations +may not capture in full detail. +Interestingly, attempting to remove galaxies where the rotation +curve does not trace the circular velocity curve from a sample to +be used to measure the BTFR likely still results in a bias, because +those galaxies where the rotation curve is a good tracer of the circular +velocity curve are not an unbiased sub-sample: they tend to be the +most gas-rich galaxies and to have higher 𝑀bar at fixed 𝑣max (see +discussion of Fig. 8). There is observational evidence for such a bias: +Papastergis et al. (2016) found that using a sample selected to be +extremely gas-rich (𝑀gas/𝑀★ ≳ 2.7) yields a steeper slope for the +BTFR 𝑀bar ∝ (𝑊/2)𝛼 as a function of H i line width 𝑊 than studies +with less extreme selections (e.g. Zaritsky et al. 2014; Hall et al. 2012; +McGaugh 2012). They find 𝛼 = 3.75±0.11 (rather than 𝛼 ∼ 3.3–3.4). +Ball et al. (2022) similarly find that restricting their sample to gas- +rich (𝑀HI/𝑀★ > 2) galaxies significantly increases the slope of the +BTFR, from about 3.3 to 3.9. They also find that dividing their galaxy +into high- and low-baryonic mass sub-samples (at 𝑀bar = 1010 M⊙ +gives different slopes, of 2.9 and 4.1, respectively. However, Gogate +et al. (2022) instead find no strong dependence on gas fraction. All +of these studies use spatially-integrated spectral line widths for the +velocity axis of the BTFR. Searching for similar trends when spatially +resolved rotation curves are used instead is an interesting avenue for +future studies. +6 We have constrained the fit to intersect that from the upper panel at 𝑣max = +120 km s−1, loosely motivated by the BTFR being best constrained around +this maximum circular velocity. Without this constraint, the best-fitting line +has a much shallower slope that we attribute to the sparse sampling at higher +𝑣flat. +Table 3. For each galaxy in our sample: maximum circular velocity (𝑣max); ra- +tio of flat value of the rotation curve (𝑣flat, see Sec. 3.3.1) and 𝑣max; amplitude +of the circular velocity curve at the ‘fiducial radius’ (𝑣fid,circ, see Sec. 3.3.2); +ratio of the amplitude of the rotation curve at the ‘fiducial radius’ (𝑣fid,rot) +and 𝑣fid,circ. Galaxies are in order of increasing 𝑄, as in Table 1. +𝑣max +𝑣flat +𝑣max +𝑣fid,circ +𝑣fid,rot +𝑣fid,circ +Class +Galaxy ID +(km s−1) +(km s−1) +1 +AP-L1-V11-3-0 +118 +0.96 +103 +0.89 +1 +AP-L1-V1-4-0 +91 +0.97 +73 +0.89 +1 +AP-L1-V4-8-0 +69 +0.97 +48 +0.75 +2 +AP-L1-V6-12-0 +76 +0.95 +63 +0.75 +2 +AP-L1-V6-8-0 +76 +0.91 +57 +0.86 +2 +AP-L1-V1-8-0 +68 +0.95 +52 +0.99 +2 +AP-L1-V6-5-0 +89 +1.07 +76 +0.84 +2 +AP-L1-V10-6-0 +103 +0.81 +80 +0.73 +2 +AP-L1-V6-19-0 +61 +0.93 +45 +1.09 +2 +AP-L1-V11-6-0 +88 +0.93 +71 +0.54 +2 +AP-L1-V10-14-0 +65 +0.81 +50 +0.84 +2 +AP-L1-V4-10-0 +66 +0.88 +45 +0.62 +3 +AP-L1-V4-6-0 +86 +0.91 +78 +0.77 +3 +AP-L1-V11-5-0 +91 +0.79 +69 +0.79 +3 +AP-L1-V1-7-0 +72 +0.93 +62 +0.83 +3 +AP-L1-V4-14-0 +60 +1.09 +43 +1.04 +3 +AP-L1-V6-7-0 +68 +0.82 +43 +0.65 +3 +AP-L1-V10-30-0 +61 +0.88 +48 +0.75 +3 +AP-L1-V6-16-0 +65 +0.94 +54 +0.68 +4 +AP-L1-V6-20-0 +68 +0.91 +59 +0.98 +4 +AP-L1-V6-18-0 +62 +0.78 +55 +0.65 +4 +AP-L1-V10-19-0 +67 +1.01 +47 +0.71 +4 +AP-L1-V4-13-0 +65 +0.92 +52 +0.59 +4 +AP-L1-V6-15-0 +62 +0.88 +44 +0.58 +4 +AP-L1-V10-22-0 +65 +0.75 +47 +0.48 +4 +AP-L1-V6-6-0 +67 +1.05 +46 +0.84 +4 +AP-L1-V10-16-0 +75 +0.92 +53 +0.92 +4 +AP-L1-V10-20-0 +73 +1.02 +63 +0.41 +4 +AP-L1-V10-5-0 +109 +0.97 +97 +0.90 +4 +AP-L1-V1-6-0 +60 +0.62 +44 +0.64 +4 +AP-L1-V10-17-0 +65 +0.60 +47 +0.39 +4 +AP-L1-V6-11-0 +60 +0.96 +48 +-0.08 +4 +AP-L1-V10-13-0 +84 +0.60 +77 +0.57 +3.3.2 The 𝑣fid − 𝑣max relation +Santos-Santos et al. (2020) adapted a relation introduced by Oman +et al. (2015) that relates the maximum rotation speed (or circular +velocity) and the rotation speed (or circular velocity) at an inner +radius, 𝑣fid = 𝑣(𝑟fid). The radius 𝑟fid ≡ (𝑣max/70 km s−1) 2 kpc is +defined to adapt to the scale of each galaxy. This quantifies the shape +of the rotation curve or circular velocity curve: a more slowly rising +curve (i.e. a rotation curve with a shallow inner slope) has a lower +𝑣fid at a given 𝑣max than a more steeply rising curve. +We plot this relation for our sample of simulated galaxies in the +upper panel of Fig. 10, here using 𝑣fid,circ and 𝑣max measured from +their circular velocity curves. We also plot measurements from the +compilation of Santos-Santos et al. (2020) for context (see Oman +et al. 2019; Santos-Santos et al. 2020; Roper et al. 2022, for further +discussion of the comparison). +In the second panel, we plot the locations of our sample of galaxies +in the same space, but measured from their 𝑧 = 0 rotation curves +(𝑣flat is measured as in Fig. 9). The values corresponding to each +plotted point for all galaxies in our sample are tabulated in Table 3. +MNRAS 000, 1–15 (2023) + +Rotation curves of low-mass galaxies +13 +In addition to the tendency for rotation curves to underestimate the +maximum circular velocity as discussed above, 29 of the 33 galaxies +in our sample have a significantly lower rotation velocity than circular +velocity at 𝑟fid, with the effect being severe for most class 4 galaxies. +In some cases the shape of the rotation curve is broadly preserved +(displacements parallel to the solid grey line in the second panel), +while in others the rotation curve rises much more slowly (vertical +displacement downwards). Interestingly, the resulting scatter in the +space of 𝑣fid−𝑣flat is not dissimilar from that observed for the SPARC +galaxies, with even some similarly extreme outliers. We caution, +however, that in practice rotation curve measurements do not recover +the median azimuthal velocity as a function of radius exactly but are +subject to various systematic errors in modelling, especially in their +central regions (Oman et al. 2019). The scatter in the lower panel of +Fig. 10 is therefore likely a lower bound on what would be obtained +were these simulated galaxies ‘observed’ and modelled analogously +to real galaxies – as is confirmed by Oman et al. (2019) for a subset +of the galaxies in our sample. The discrepancy between the rotation +curves of low-mass galaxies and their circular velocity curves may +be a significant contributor to the diversity in the shapes of observed +dwarf galaxy rotation curves highlighted by Oman et al. (2015). +4 CONCLUSIONS +4.1 Summary +That the cold gas in some observed galaxies is out of equilibrium and +is therefore a poor dynamical mass tracer is well known. However, just +how rare it may be that an atomic gas rotation curve can reasonably be +interpreted as a circular velocity curve has not been previously been +systematically explored. Our visualisations of the gas kinematics of +low-mass APOSTLE galaxies (60 < 𝑣max/km s−1 < 120) over the +past ∼ 4 Gyr emphasize the wide variety of processes perturbing +them. +Only about a third (12/33) of the galaxies in our sample have +rotation curves that we would describe as similar to their circular +velocity curves, with examples of close matches being rarer still +(3/33). These are found at preferentially higher gas masses (𝑀gas ≳ +1.5 × 109 M⊙). Based on our visual inspection of galaxies and their +recent history, the most frequent types of perturbations include: +• Mergers and interactions with gas-rich companion galaxies +(6/33). +• Bulk radial gas inflows, likely driven by accretion (19/33), and +vertical gas outflows, likely driven by supernovae (15/33). +• Prolate or triaxial DM halo shapes (17/33). +• Warps (8/33). +• Winds due to motion through the IGM (5/33). +The fractions in parentheses indicate the fraction of galaxies in our +sample that exceed the thresholds for ‘strong perturbations’ of the +given type outlined in Secs. 3.2.1–3.2.5 (entries in bold face in Ta- +ble 1). Only 5/33 galaxies in our sample avoid ‘strong’ perturbations +in all of these categories at 𝑧 = 0. +Some of these types of perturbations (e.g. mergers) are readily +identified observationally, such that the galaxy in question can be +excluded from samples for kinematic analysis, but others (e.g. IGM +wind, influence of triaxial DM halo) are much more subtle. Further- +more, because susceptibility to perturbation correlates with galaxy +properties such as total cold gas mass, omitting perturbed galaxies +from analyses introduces biases. In particular, we find that this has +probably led to an underestimate of the low-velocity slope of the +baryonic Tully-Fisher relation, offering a straightforward explana- +tion for the steeper slope for gas-rich galaxies found by (Papastergis +et al. 2016). +Whether our findings based on the APOSTLE simulations are ap- +plicable to observed galaxies depends on how faithfully the simula- +tions capture the relevant physical processes. Of the main categories +of perturbations that we see operating in the simulations, we would +characterize only one (supernova-driven outflows) as sensitively de- +pendent on modelling choices in which there is significant ambiguity. +Other processes like the merger rate or the shapes of DM haloes are +natural consequences of structure formation in a ΛCDM cosmology, +and depend on physics that is well understood and straightforward to +implement in the models. There is only a single galaxy in our sample +that we have flagged as having strong vertical outflows, but not any +other strong perturbations at 𝑧 = 0. Our main conclusion that a ma- +jority of galaxies in the 𝑣max range of our sample have rotation curves +that differ significantly from their circular velocity curves is therefore +probably also applicable to real low-mass galaxies, but confirming +this in other cosmological hydrodynamical galaxy formation models +would reinforce this. Galaxies with non-equilibrium gas kinematics +are therefore likely one of the main drivers of the observed kinematic +diversity (as highlighted by Oman et al. 2015) in dwarfs. +4.2 Reflections on visualisation-driven analysis +Our starting point for all of the analysis presented above was our col- +lection of galaxy evolution visualisations and their circular velocity +and rotation curves at the corresponding times. This allowed us to +build a strong intuition for the perturbations affecting the galaxies in +our sample. The visualisations highlight the diversity and complex- +ity of these low-mass galaxies in a way that cannot be fully captured +by integrated properties (such as those in Figs. 4–7) and provided +important context for our more quantitative analysis. We can identify +several instances where we would probably have reached qualita- +tively different conclusions if the visualisations were not available to +guide our intuition and analysis. +The sheer wealth of information represented by the visualisations +and rotation curves eventually motivated our choice to focus our +analysis on the current time (𝑧 = 0). The gas discs of essentially +every galaxy in our sample have been subject to different perturbative +processes at different times. The time dimension of our data set +remains largely unexplored, offering an interesting avenue for future +work. +The human eye is an exceptionally powerful tool for reducing com- +plex visual information to simple patterns and trends. Visualisation- +driven analysis of cosmological hydrodynamical simulations has, in +our opinion, a largely untapped potential to advance our understand- +ing of a wide variety of physical processes in galaxies. +ACKNOWLEDGEMENTS +We thank I. Santos-Santos, A. Ponomareva, A. Fattahi and J. Navarro +for invaluable comments on an early draft of this work. KAO acknowl- +edges support by the European Research Council (ERC) through +Advanced Investigator grant to C.S. Frenk, DMIDAS (GA 786910), +and by STFC through grant ST/T000244/1. ERD was supported by +a Durham Physics Developing Talent Award to K. A. Oman. This +work used the DiRAC@Durham facility managed by the Institute +for Computational Cosmology on behalf of the STFC DiRAC HPC +Facility (www.dirac.ac.uk). The equipment was funded by BEIS cap- +ital funding via STFC capital grants ST/K00042X/1, ST/P002293/1, +MNRAS 000, 1–15 (2023) + +14 +E. R. Downing & K. A. Oman +ST/R002371/1 and ST/S002502/1, Durham University and STFC +operations grant ST/R000832/1. DiRAC is part of the National +e-Infrastructure. This work has made use of NASA’s Astrophysics +Data System. +DATA AVAILABILITY +The SPARC data are available at https://cdsarc.cds.unistra. +fr/viz-bin/cat/J/AJ/152/157, with supplementary data tabu- +lated in Santos-Santos et al. (2020), table A1. Access to the APOS- +TLE simulation data is available on reasonable request to the cor- +responding author. Basic properties of galaxies in our sample are +tabulated in Oman et al. (2019), table A1. +REFERENCES +Ball C. J., Haynes M. P., Jones M. G., Peng B., Durbala A., Koopmann R. A., +Ribaudo J., O’Donoghue A., 2022, arXiv e-prints, p. arXiv:2212.08728 +Benitez-Llambay +A., +2015, +py-sphviewer: +Py-SPHViewer +v1.0.0, +doi:10.5281/zenodo.21703, http://dx.doi.org/10.5281/zenodo. +21703 +Benítez-Llambay A., Frenk C. S., Ludlow A. D., Navarro J. F., 2019, MNRAS, +488, 2387 +Binney J., Tremaine S., 2008, Galactic Dynamics: Second Edition. Princeton +University Press +Blitz L., Rosolowsky E., 2006, ApJ, 650, 933 +Bose S., et al., 2019, MNRAS, 486, 4790 +Bosma A., 1981, AJ, 86, 1825 +Bradford J. D., Geha M. C., van den Bosch F. C., 2016, ApJ, 832, 11 +Brook C. B., Santos-Santos I., Stinson G., 2016, MNRAS, 459, 638 +Brooks A. M., Papastergis E., Christensen C. R., Governato F., Stilp A., Quinn +T. R., Wadsley J., 2017, ApJ, 850, 97 +Bullock J. S., Boylan-Kolchin M., 2017, ARA&A, 55, 343 +Chan T. K., Kereš D., Oñorbe J., Hopkins P. F., Muratov A. L., Faucher- +Giguère C. A., Quataert E., 2015, MNRAS, 454, 2981 +Crain R. A., et al., 2015, MNRAS, 450, 1937 +Creasey P., Sameie O., Sales L. V., Yu H.-B., Vogelsberger M., Zavala J., +2017, MNRAS, 468, 2283 +Dalla Vecchia C., Schaye J., 2012, MNRAS, 426, 140 +Davis M., Efstathiou G., Frenk C. S., White S. D. M., 1985, ApJ, 292, 371 +Desmond H., 2012, arXiv e-prints, p. arXiv:1204.1497 +Di Cintio A., Brook C. B., Macciò A. V., Stinson G. S., Knebe A., Dutton +A. A., Wadsley J., 2014, MNRAS, 437, 415 +Dolag K., Borgani S., Murante G., Springel V., 2009, MNRAS, 399, 497 +Fattahi A., et al., 2016, MNRAS, 457, 844 +Flores R. A., Primack J. R., 1994, ApJ, 427, L1 +Frosst M., Courteau S., Arora N., Stone C., Macciò A. V., Blank M., 2022, +MNRAS, 514, 3510 +Gogate A. R., Verheijen M. A. W., van der Hulst J. M., Jaffé Y. L., 2022, +MNRAS, in press +Haardt F., Madau P., 2001, in Neumann D. M., Tran J. T. V., eds, Clusters +of Galaxies and the High Redshift Universe Observed in X-rays. p. 64 +(arXiv:astro-ph/0106018) +Hall M., Courteau S., Dutton A. A., McDonald M., Zhu Y., 2012, MNRAS, +425, 2741 +Hayashi E., Navarro J. F., 2006, MNRAS, 373, 1117 +Helly J. C., Cole S., Frenk C. S., Baugh C. M., Benson A., Lacey C., 2003, +MNRAS, 338, 903 +Hopkins P. F., 2013, MNRAS, 428, 2840 +Jahn E. D., et al., 2021, arXiv e-prints, p. arXiv:2110.00142 +Kaplinghat M., Ren T., Yu H.-B., 2020, J. Cosmology Astropart. Phys., 2020, +027 +Komatsu E., et al., 2011, ApJS, 192, 18 +Lelli F., 2022, Nature Astronomy, 6, 35 +Lelli F., McGaugh S. S., Schombert J. M., 2016a, AJ, 152, 157 +Lelli F., McGaugh S. S., Schombert J. M., 2016b, ApJ, 816, L14 +Lelli F., McGaugh S. S., Schombert J. M., Desmond H., Katz H., 2019, +MNRAS, 484, 3267 +Mancera Piña P. E., et al., 2019, ApJ, 883, L33 +McGaugh S. S., 2012, AJ, 143, 40 +McGaugh S. S., Schombert J. M., Bothun G. D., de Blok W. J. G., 2000, ApJ, +533, L99 +Moore B., 1994, Nature, 370, 629 +Navarro J. F., Eke V. R., Frenk C. S., 1996a, MNRAS, 283, L72 +Navarro J. F., Frenk C. S., White S. D. M., 1996b, ApJ, 462, 563 +Oman K. A., et al., 2015, MNRAS, 452, 3650 +Oman K. A., Navarro J. F., Sales L. V., Fattahi A., Frenk C. S., Sawala T., +Schaller M., White S. D. M., 2016, MNRAS, 460, 3610 +Oman K. A., Marasco A., Navarro J. F., Frenk C. S., Schaye J., Benítez- +Llambay A., 2019, MNRAS, 482, 821 +Papastergis E., Giovanelli R., Haynes M. P., Shankar F., 2015, A&A, 574, +A113 +Papastergis E., Adams E. A. K., van der Hulst J. M., 2016, A&A, 593, A39 +Ponomareva A. A., Verheijen M. A. W., Papastergis E., Bosma A., Peletier +R. F., 2018, MNRAS, 474, 4366 +Pontzen A., Governato F., 2012, MNRAS, 421, 3464 +Pontzen A., Governato F., 2014, Nature, 506, 171 +Power C., Navarro J. F., Jenkins A., Frenk C. S., White S. D. M., Springel V., +Stadel J., Quinn T., 2003, MNRAS, 338, 14 +Rahmati A., Pawlik A. H., Raičević M., Schaye J., 2013, MNRAS, 430, 2427 +Read J. I., Gilmore G., 2005, MNRAS, 356, 107 +Read J. I., Iorio G., Agertz O., Fraternali F., 2016, MNRAS, 462, 3628 +Ren T., Kwa A., Kaplinghat M., Yu H.-B., 2019, Physical Review X, 9, +031020 +Roper F. A., Oman K. A., Frenk C. S., Benítez-Llambay A., Navarro J. F., +Santos-Santos I. M. E., 2022, arXiv e-prints, p. arXiv:2203.16652 +Rubin V. C., Ford W. K. J., Thonnard N., 1980, ApJ, 238, 471 +Sales L. V., et al., 2017, MNRAS, 464, 2419 +Sales L. V., Wetzel A., Fattahi A., 2022, Nature Astronomy, 6, 897 +Santos-Santos I. M. E., et al., 2020, MNRAS, 495, 58 +Sawala T., et al., 2016, MNRAS, 457, 1931 +Schaye J., 2004, ApJ, 609, 667 +Schaye J., Dalla Vecchia C., 2008, MNRAS, 383, 1210 +Schaye J., et al., 2015, MNRAS, 446, 521 +Sellwood J. A., Spekkens K., Eckel C. S., 2021, MNRAS, 502, 3843 +Sorce J. G., Guo Q., 2016, MNRAS, 458, 2667 +Spergel D. N., Steinhardt P. J., 2000, Phys. Rev. Lett., 84, 3760 +Springel V., White S. D. M., Tormen G., Kauffmann G., 2001, MNRAS, 328, +726 +Tollet E., et al., 2016, MNRAS, 456, 3542 +Trujillo-Gomez S., Klypin A., Primack J., Romanowsky A. J., 2011, ApJ, +742, 16 +Tulin S., Yu H.-B., 2018, Phys. Rep., 730, 1 +de Blok W. J. G., 2010, Advances in Astronomy, 2010, 789293 +Valenzuela O., Rhee G., Klypin A., Governato F., Stinson G., Quinn T., +Wadsley J., 2007, ApJ, 657, 773 +Verbeke R., Papastergis E., Ponomareva A. A., Rathi S., de Rijcke S., 2017, +A&A, 607, A13 +Wiersma R. P. C., Schaye J., Smith B. D., 2009a, MNRAS, 393, 99 +Wiersma R. P. C., Schaye J., Theuns T., Dalla Vecchia C., Tornatore L., 2009b, +MNRAS, 399, 574 +Wingfield McQuinn K. B., et al., 2022, arXiv e-prints, p. arXiv:2203.10105 +Zaritsky D., et al., 2014, AJ, 147, 134 +APPENDIX A: GALAXY VIDEOS, CIRCULAR VELOCITY +AND ROTATION CURVES +We include as supplementary material a collection of mp4 video files +for each galaxy in our sample showing different views of their evo- +lution over the past 4 Gyr ({AP-ID} is substituted with the identifier +of each galaxy, such as AP-L1-V6-5-0): +MNRAS 000, 1–15 (2023) + +Rotation curves of low-mass galaxies +15 +• {AP-ID}-composite-edge-and-face.mp4 +Side-by-side +views of the galaxy seen face-on and edge-on, with a composite +image of the projected DM density (grey scale) and gas density +(purple-yellow colour). +• {AP-ID}-gas-edge-and-face.mp4 Side-by-side views of +the galaxy seen face-on and edge-on, showing the projected gas +density. +• {AP-ID}-face-gas-and-dm.mp4 Side-by-side views of the +galaxy seen face-on, in projected DM density (left) and gas density +(right). +• {AP-ID}-edge-gas-and-dm.mp4 Side-by-side views of the +galaxy seen edge-on, in projected DM density (left) and gas density +(right). +Details of the creation of these visualisations is given in Sec. 2.3. We +note that in some cases the orientation of the camera is arbitrary in +the initial frames of the videos – this is due to the angular momentum +of the gas disc not being evaluated until the first snapshot (8.94 Gyr) +after the start time (8.88 Gyr). +In addition, we include a file {AP-ID}-rotation-curves.pdf +with a page showing at the time of each simulation snapshot: +• A plot showing the circular velocity curve (purple), and the +median azimuthal velocity of atomic gas (orange) particles at the +labelled time of the snapshot, measured as described in Sec. 2.2. +• The face-on (left) and edge-on (right) gas density images of the +galaxy, i.e. matching those in {AP-ID}-gas-edge-and-face.mp4. +Examples for a single galaxy are available on arXiv as ancillary +files and can also be found at: http://icc.dur.ac.uk/~txwx36/ +share/DowningOman2023_supplementary/AP-L1-V6-5-0/ +The +complete +collection +of +supplementary +material +can +be downloaded from http://icc.dur.ac.uk/~txwx36/share/ +DowningOman2023_supplementary.tar.gz. +This paper has been typeset from a TEX/LATEX file prepared by the author. +109 +1010 +Mbar = M⋆ + 1.33MHI [M⊙] +vmax +SPARC +(Lelli et al. 2016a) +Lelli et al. (2016b) +not currently +interacting +ongoing interaction +with companion +109 +1010 +Mbar = M⋆ + 1.33MHI [M⊙] +vflat +fit (vmax) +fit (vflat) +100 +40 +50 +60 +70 +80 +90 +vmax or vflat [km s−1] +0.6 +0.7 +0.8 +0.9 +1.0 +1.1 +vflat/vmax +vmax +class 1 +class 2 +class 3 +class 4 +Figure 9. The baryonic Tully-Fisher relation (BTFR). Upper panel: Baryonic +mass against maximum circular velocity, 𝑣max. Point colours and open/filled +symbols are as in Fig. 8. The solid black line is a linear fit to the open +symbols (see Sec. 3.3.1 for details). The BTFR fit from Lelli et al. (2016b) +is plotted with a thin solid grey line, and data from the SPARC compilation +(Lelli et al. 2016a) as small grey points. The areas outside of our selection +60 < 𝑣max/km s−1 < 120 are shaded in light grey. Centre panel: As upper +panel, but with the measured maximum gas rotation velocity, 𝑣flat, on the +horizontal axis (see Sec. 3.3.1 for details of how 𝑣flat is measured). The points +are joined to their respective locations in the upper panel by a horizontal +line. The fit line from the upper panel is repeated, and a fit to the filled +points in this panel is shown with a dashed black line. Lower panel: The +ratio 𝑣flat/𝑣max, plotted against 𝑣max. The maximum gas rotation velocity +systematically underestimates the maximum circular velocity (𝑣flat/𝑣max < +1), and the underestimates get systematically worse at lower 𝑣max; this has +the potential to bias both the normalisation and the slope of the BTFR. +MNRAS 000, 1–15 (2023) + +16 +E. R. Downing & K. A. Oman +100 +20 +30 +40 +50 +60 +70 +80 +90 +vfid,circ [km s−1] +vmax +1:1 +NFW +(vfid ∼ 0.65vmax) +Santos-Santos +et al. (2020) +not currently +interacting +ongoing interaction +with companion +100 +20 +30 +40 +50 +60 +70 +80 +90 +vfid,rot [km s−1] +vflat +class 1 +class 2 +class 3 +class 4 +0.50 +0.75 +1.00 +vflat/vmax +vmax +100 +40 +50 +60 +70 +80 +90 +vmax or vflat [km s−1] +0.50 +0.75 +1.00 +vfid,rot/vfid,circ +vmax +Figure 10. Upper panel: The circular velocity measured at an inner, ‘fiducial +radius’ 𝑟fid (see Sec. 3.3.2) is plotted against the maximum circular velocity, +𝑣max. Lower central densities correspond to lower 𝑣fid,circ at fixed 𝑣max. The +relation for an NFW density profile (Santos-Santos et al. 2020) is shown with +a grey solid line. Data from the compilation of Santos-Santos et al. (2020) +are plotted with small grey points. Point colours and open/filled symbols are +as in Fig. 8. The areas outside of our selection 60 < 𝑣max/km s−1 < 120 +are shaded in light grey. Second panel: As upper panel, but the inner and +outer rotation velocities 𝑣fid,rot and 𝑣flat are measured from the 𝑧 = 0 rotation +curve. Points are joined by a line to their positions in the upper panel. Third +panel: Ratio of the outer rotation velocity measured from the rotation curve +and that measured from the circular velocity curve 𝑣flat/𝑣max, plotted against +𝑣max. Point colours and open/filled symbols as in upper panels. Lower panel: +Ratio of the inner rotation velocity measured from the rotation curve and +that measured from the circular velocity curve 𝑣fid,rot/𝑣fid,circ, plotted against +𝑣max. Point colours and open/filled symbols as in upper panels. +MNRAS 000, 1–15 (2023) + diff --git a/p9E4T4oBgHgl3EQfvg1C/content/tmp_files/load_file.txt b/p9E4T4oBgHgl3EQfvg1C/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..678b70d907f85f9a7513d7d8df4645c0178efbeb --- /dev/null +++ b/p9E4T4oBgHgl3EQfvg1C/content/tmp_files/load_file.txt @@ -0,0 +1,1726 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf,len=1725 +page_content='MNRAS 000, 1–15 (2023) Preprint 16 January 2023 Compiled using MNRAS LATEX style file v3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 The many reasons that the rotation curves of low-mass galaxies can fail as tracers of their matter distributions Eleanor R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing,1,2★ Kyle A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman1,2† 1Institute for Computational Cosmology, Durham University, South Road, Durham, DH1 3LE, United Kingdom 2Department of Physics, Durham University, South Road, Durham, DH1 3LE, United Kingdom Accepted XXX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Received YYY;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' in original form ZZZ ABSTRACT It is routinely assumed that galaxy rotation curves are equal to their circular velocity curves (modulo some corrections) such that they are good dynamical mass tracers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We take a visualisation-driven approach to exploring the limits of the validity of this assumption for a sample of 33 low-mass galaxies (60 < 𝑣max/km s−1 < 120) from the APOSTLE suite of cosmological hydrodynamical simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Only 3 of these have rotation curves nearly equal to their circular velocity curves at 𝑧 = 0, the rest are undergoing a wide variety of dynamical perturbations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We use our visualisations to guide an assessment of how many galaxies are likely to be strongly perturbed by processes in several categories: mergers/interactions (affecting 6/33 galaxies), bulk radial gas inflows (19/33), vertical gas outflows (15/33), distortions driven by a non-spherical DM halo (17/33), warps (8/33), and winds due to motion through the IGM (5/33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Most galaxies fall into more than one of these categories;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' only 5/33 are not in any of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The sum of these effects leads to an underestimation of the low-velocity slope of the baryonic Tully-Fisher relation (𝛼 ∼ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 instead of 𝛼 ∼ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9, where 𝑀bar ∝ 𝑣𝛼) that is difficult to avoid, and could plausibly be the source of a significant portion of the observed diversity in low-mass galaxy rotation curve shapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Key words: galaxies: kinematics and dynamics – galaxies: dwarf – dark matter 1 INTRODUCTION Since the discovery of flat rotation curves in galaxies (Rubin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 1980;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Bosma 1981) leading to the widespread acceptance of dark matter (DM) theories, rotation curves have been used to study DM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Low-mass galaxies, with maximum circular velocities ≲ 120 km s−1, are particularly well suited for such analysis because their high DM mass fractions reduce the relative gravitational influence of baryons, so that their circular velocity almost directly traces their DM content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The baryonic Tully-Fisher relation (BTFR;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' McGaugh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2000) provides a concise summary of this trend: the baryonic (gas plus stellar) mass of galaxies is observed to be proportional to about the fourth power of their maximum rotation velocities, 𝑀bar ∝ 𝑣4max (but see Ponomareva et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2018), but a constant baryon-to-DM mass ratio would instead imply a shallower slope close to 𝑀bar ∝ 𝑣3max (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Sales et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The slope and scatter of the BTFR for the lowest mass galaxies (𝑀bar ≲ 109 M⊙), however, remain challenging to constrain (Sorce & Guo 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Papastergis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Bradford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Verbeke et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Ponomareva et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Mancera Piña et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Wingfield McQuinn et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Ball et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2022, and Lelli 2022, a review), and leaves the connection between the luminous components of galaxies and the DM haloes in which they form at the low-mass edge of galaxy formation uncertain (Trujillo-Gomez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2011;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Desmond 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Papastergis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' ★ E-mail: eleanor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='downing@durham.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='uk † E-mail: kyle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='oman@durham.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='uk Brook et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Brooks et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Sales et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Studies of dwarf galaxies have revealed several potential problems in near-field cosmology (see Bullock & Boylan-Kolchin 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Sales et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2022, for reviews).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' One such problem that remains unresolved is the ‘cusp-core’ problem (Flores & Primack 1994;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Moore 1994;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' de Blok 2010);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' the inner slopes of low-mass galaxy rotation curves are often slowly rising compared to the mass profile implied by the steep central density ‘cusps’ predicted by N-body simulations (Navarro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 1996b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There have been many proposed resolutions of the cusp-core prob- lem within the ΛCDM framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' One such proposal is that gas flows driven by supernova feedback couple gravitationally to the DM and re-distribute it, producing and maintaining a central density ‘core’ (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Navarro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 1996a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Read & Gilmore 2005;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Pontzen & Gov- ernato 2012, and see Pontzen & Governato 2014, for a review).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The ‘bursty’ star formation histories arising in some galaxy formation simulations produce cores in a limited mass range (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Di Cintio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Chan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Tollet et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Jahn et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2021), and the conditions necessary for core formation via this mechanism are now well-understood (Bose et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Benítez-Llambay et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' However, whether such effects can fully reproduce the diverse rotation curves observed for dwarf galaxies remains unclear (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Roper et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Another proposed scenario involves allowing cold DM particles to scatter from each other, leading to heat transfer to the inner regions of DM haloes and redistributing the DM to produce a core (Spergel & Steinhardt 2000).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Such ‘self-interacting dark matter’ (SIDM) models © 2023 The Authors arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='05242v1 [astro-ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='GA] 12 Jan 2023 2 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing & K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman inherit the large scale successes of the standard ΛCDM model, and are able to produce a range of rotation curve shapes by including the gravitational influence of baryons, which can re-form a cusp (see Tulin & Yu 2018, for a recent review).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This shows promise (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Ren et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Kaplinghat et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2020) however, again, concerns whether SIDM can account for the full observed diversity remain (Creasey et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' More prosaically, the problem could be that the circular veloc- ity curves of low-mass galaxies are not accurately measured by the methods used to extract them from observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The inclination angle (possibly varying with radius), non-circular motions, poten- tially anisotropic velocity dispersion, and geometrically thick and/or flared nature of gas discs are just some of the challenging issues that models in principle need to account for to accurately measure a rota- tion curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Strong degeneracies between parameters describing the geometry and kinematics of a gas disc further complicate matters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Attempts to model realistic galaxies with known rotation curves have revealed that the errors due to these issues can be quite severe (Read et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Roper et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2022), although Frosst et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2022) argue that such effects may still fall well short of explaining the observed diversity in rotation curve shapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There is, however, an even more worrying possibility: that the rotation curves of low-mass galaxies may in some cases not faithfully trace their circular velocity curves1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In this case even a perfectly accurate measurement of the rotation curve gives no meaningful information about the total matter distribution within a galaxy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' It is clear that some low-mass galaxies are not in dynamical equi- librium, and thus that their rotation curves are not reliable tracers of their circular velocity curves (and consequently of their DM content).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Obvious perturbations, such as mergers or star formation-driven ‘su- perbubbles’, are easily identified, however low-mass galaxies’ shal- low gravitational potential wells make them especially susceptible to additional perturbations which may not be so obvious.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' How of- ten these more subtle physical processes may cause departures from equilibrium in these objects remains almost unexplored in the liter- ature (see Hayashi & Navarro 2006 on the effect of a triaxial DM halo;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Valenzuela et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2007 on the influence of lopsided gas discs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Read et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016 on the influence of the star formation cycle;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Verbeke et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2017 sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 for a brief exploration of the topic).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In this work we make an initial assessment of the relative impor- tance of different types of perturbations using a sample of galaxies with maximum circular velocities 60 < 𝑣max/km s−1 < 120 from the APOSTLE suite of cosmological hydrodynamical simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We create visualisations of the galaxies and compute their rotation and circular velocity curves at a range of times over the past ∼ 4 Gyr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We use these to investigate the kind of perturbations that affect low- mass galaxies, their frequencies, their effects on the galaxies’ rotation curves, and what conditions are necessary for galaxies to actually ro- tate at their circular speeds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We begin in Section 2 with a brief description of the APOSTLE simulations and our methods for calculating rotation curves and pro- ducing visualisations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In Section 3, we present our main results: we describe the perturbations affecting galaxies in our sample, and inves- tigate their influence on key galaxy scaling relations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We summarize our conclusions and discuss their implications and applicability to real galaxies in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 1 Throughout this work, we use ‘circular velocity curve’ to refer to the speed of a particle on a circular orbit computed for a given density field, and ‘rotation curve’ to refer to the orbital speed of gas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2 METHODS 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 The APOSTLE simulations The APOSTLE2 simulations (Sawala et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Fattahi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016) are a suite of zoom-in cosmological hydrodynamical galaxy forma- tion simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The suite is made up of 12 regions selected to resemble the Local Group of galaxies in terms of the masses, separa- tion and kinematics of a pair of galaxies analogous to the Milky Way and Andromeda, and a lack of other massive galaxies within a few megaparsecs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A region about 2 − 3 Mpc in radius around each pair was simulated at multiple resolution levels (lowest ‘L3’ to highest ‘L1’) with the ‘Reference’ calibration (Crain et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2015) of the EA- GLE galaxy formation model (Schaye et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The model is implemented using a smoothed-particle hydrodynamics framework in the pressure-entropy formulation (Hopkins 2013) and includes prescriptions for radiative cooling (Wiersma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2009a), star for- mation (Schaye 2004;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Schaye & Dalla Vecchia 2008), stellar and chemical enrichment (Wiersma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2009b), thermal-mode stellar feedback (Dalla Vecchia & Schaye 2012) and cosmic reionisation (Haardt & Madau 2001;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Wiersma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2009b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The feedback from supermassive black hole accretion implemented in the EAGLE model has a negligible effect on the galaxies in the APOSTLE simulations (Sawala et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The simulations assume the WMAP-7 cosmo- logical parameters (Komatsu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Galaxies are identified in the simulations following a two-step process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' First, particles are linked together by a friends-of-friends (FoF) algorithm (Davis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 1985).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Each FoF group is indepen- dently analysed using the Subfind halo finding algorithm (Springel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2001;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Dolag et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2009) which identifies gravitationally bound substructures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The subhalo with the minimum gravitational potential in each FoF group is labelled the ‘central’ galaxy of the group, while others are labelled ‘satellites’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We label galaxies from the APOSTLE simulations following the same convention as Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2019): for example, AP-L1-V6-5-0 refers to APOSTLE resolution level L1, region (volume) V6, FoF group 5, subhalo 0 (the ‘central’ subhalo).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We always refer to the identifier of the galaxy in the last snapshot;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' its progenitor(s) may have different identifiers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We track the progen- itors of galaxies in our sample back through time using the merger tree algorithm of Helly et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' When a galaxy has more than one progenitor at a previous time, we follow the progenitor that con- tributed the most particles to the descendant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In this work we focus on a sample drawn exclusively from the highest-resolution (L1) simulations in the suite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Only regions V1, V4, V6, V10 and V11 have been simulated at this resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' At L1 resolution, the gas (dark matter) particle mass is typically 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 × 103 M⊙ (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 × 104 M⊙), and the maximum gravitational softening length is ≈ 134 pc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' According to the criterion of Power et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2003), the circular velocity curves of low-mass galaxies at this resolution level are numerically converged to better than 10 per cent at radii ≳ 700 pc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We focus on recent times, between 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 Gyr and 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='76 Gyr (𝑧 = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In this period there are 17 full simulation outputs (every ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 Gyr), or ‘snapshots’, and 147 partial outputs (every 34 Myr), or ‘snipshots’, where some detail – such as abundances of individual elements – is omitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Our sample of galaxies is the same as that used by Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The galaxies are selected to have maximum circular velocities 60 < 𝑣max/km s−1 < 120, to be centrals (not satellites), and to be found in FoF groups which do not include any contaminating low- 2 A Project Of Simulating The Local Environment MNRAS 000, 1–15 (2023) Rotation curves of low-mass galaxies 3 0 5 10 R [kpc] 0 20 40 60 80 100 120 vcirc or vrot [km s−1] 9 10 11 12 13 t [Gyr] Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The rotation curves of the galaxy AP-L1-V6-5-0 at times between 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 Gyr and 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='76 Gyr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The circular velocity curve increases gradually over time within the purple band.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The extracted rotation curves are much more variable and are plotted with coloured curves, with yellow for earlier and red for later times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The largest fluctuation in the rotation curves coincides with the time of a merger with a gas-rich companion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' resolution particles from outside the nominal zoom-in regions of the simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There are 33 such galaxies, with 4 found in simulation region V1, 5 in V4, 11 in V6, 10 in V10, and 3 in V11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' All are at separated from the nearest of the pair of galaxies analogous to the Milky Way and M 31 by at least 450 kpc, and up to3 4 Mpc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 Circular velocity and rotation curves We calculated the total circular velocity curves of galaxies in our sample as 𝑣circ = √︁ (𝐺𝑀(< 𝑟))/𝑟, where 𝐺 is the gravitational con- stant and 𝑀(< 𝑟) is the mass enclosed within radius 𝑟 of the location of the particle with the minimum gravitational potential, including all particle types (DM, gas, stars, and black holes).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The spherically symmetric approximation is reasonable for our sample of galaxies, which are invariably DM-dominated both globally and locally at all radii.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Furthermore, as will be seen below, the actual rotation curves preferentially underestimate the (spherically averaged) circular ve- locity curves, so the reduction in 𝑣circ by a few per cent (Binney & Tremaine 2008, sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1b) due to this approximation tends to slightly underestimate differences between the two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Before calculating rotation curves, we set the velocity zero point of each galaxy to the mean velocity of its 100 innermost ‘atomic’ gas particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We define atomic gas particles as those with H i mass fractions of greater than 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The H i mass fractions are calculated as detailed in Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2019) – in brief, these assume the em- pirical prescription of Rahmati et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2013) to compute the neutral fractions of particles, and the relation given in Blitz & Rosolowsky (2006) to partition atomic from molecular gas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We then calculate the angular momentum vector of the atomic gas disc by summing the angular momenta of the innermost 50 per cent of atomic gas particles (or 125,000, whichever is fewer).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We rotate the coordinate 3 The ‘zoom-in’ region has an irregular shape and can extend beyond the nominal radius of 2−3 Mpc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The condition that no low-resolution particles are present in the FoF group ensures that the galaxies in our sample are sufficiently far from the boundary of the ‘zoom-in’ region to avoid any spurious numerical effects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' frame so that the angular momentum vector points in the 𝑧-direction, placing the disc in the 𝑥-𝑦 plane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We measure the median azimuthal velocity of atomic gas particles gravitationally bound to the galaxy within cylindrical annuli of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 kpc width.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This bin width offers a good compromise between limiting noise in the measurement and resolving the structure in the rotation curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We measure the rota- tion curves out to the edge of the atomic gas disc, which we define as the radius enclosing 90 per cent of the H i mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The rotation curves are not corrected for a possible radial pressure gradient in the gas disc (often incorrectly termed an ‘asymmetric drift correction’, see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Valenzuela et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2007, appendix A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Such corrections for our sample of galaxies (at 𝑧 = 0) were computed by Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2019) and are invariably small (≲ 10 per cent), except for during mergers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Since we focus below on links between visible (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3) gas kinematic features and rotation curve features we omit further discussion of pressure-support corrections for simplicity, but note that we do not expect that accounting for these would qualita- tively change any of our conclusions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This process was repeated for the 17 snapshots between 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 Gyr and 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='76 Gyr to produce a set of circular velocity and rotation curves over time, for each of the 33 galaxies in our sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 1 shows the resulting curves at each snapshot for the galaxy AP-L1-V6-5-0, as an illustrative example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 Images and videos We use the py-sphviewer (Benitez-Llambay 2015) toolkit to create videos of galaxies in our sample over time to explore the kinds of perturbations that affect them and their effects on their rotation curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In py-sphviewer, the ‘observer’ is referred to as the ‘camera’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The parameters specifying the camera position and orientation are ‘anchored’ at the times corresponding to snapshots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The camera is pointed at the centre of the galaxy of interest (defined as the location of the particle with the minimum gravitational potential), and placed at a distance such that an image with a 90◦ field of view extends to about twice the radius of the atomic gas disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We track both a ‘face- on’ view camera offset from the centre along the angular momentum vector of the disc (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2), and an ‘edge-on’ view camera offset along an arbitrarily chosen orthogonal axis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Each galaxy is visualised at 383 times evenly spaced between 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 and 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='76 Gyr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Since the time when a visualisation is to be created does not in general correspond to the time of a snapshot or snipshot, particle positions are linearly interpolated between the two simulation outputs closest to the desired time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The parameters describing the camera position and orientation are also linearly interpolated to the desired time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The close spacing of the snipshots in time means that a higher-order interpolation scheme is not necessary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Finally, the normalisation of the colour scale of the images is linearly interpolated between the maximum pixel values in the first and last image (and likewise for the minima) in each series to prevent ‘flickering’ and over/under- saturation of the images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' To focus attention on the object of interest, the contributions of simulation particles more than 50 kpc from the centre of the object of interest is exponentially suppressed with a scale length of 50 kpc, such that anything beyond ∼ 300 kpc is essentially invisible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We use this procedure to create videos visualising the galaxy face- on and edge-on for DM and gas particle types, and assemble these in a variety of combinations (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' face-on and edge-on with composite DM plus gas images;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' face-on showing DM and gas particles side by side, edge-on showing DM and gas particles side by side) to create an information-rich set of videos for each galaxy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We also produce a set MNRAS 000, 1–15 (2023) 4 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing & K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Selected frames from the face-on video of the galaxy AP-L1-V6-5-0, showing a gas-rich merger which strongly disrupts the gas disc and the rotation curve around 𝑡 ≈ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 Gyr (shown in Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The partially stripped, but still gas-rich, secondary halo has a second approach, once again disturbing the galaxy, around 𝑡 ≈ 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 Gyr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The first two images show the DM density only (grey-scale) and gas density only (purple-orange colour map) respectively, for the same time as shown in the third panel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The further images are composites of DM density and gas density.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' On compatible pdf viewer software a video will play before the figure is displayed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' It shows the evolution of the galaxy over ∼ 4 Gyr with side-by-side face-on (left) and edge-on (right) views of the galaxy showing the DM and gas density composite visualisation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The same video is available in the supplementary materials as AP-L1-V6-5-0-composite-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 (see Appendix A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' of figures for each galaxy showing its circular velocity and rotation curve at the time of each snapshot side-by-side with an image of the galaxy at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Further details are given in the Appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2 shows a few example frames from a DM-plus-gas composite face-on view video (on compatible software the video itself will be shown before the figure is displayed) for the galaxy AP-L1-V6-5-0 (the same galaxy as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 1), showing a gas-rich merger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3 RESULTS We examined the videos and rotation curves for each galaxy in detail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We noted the characteristics of each galaxy, the types of perturbations visibly affecting each, and their effects on the galaxy and its rotation curve over time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We allowed the qualitative impressions formed dur- ing this process to guide the creation of a quantitative summary of the different types of perturbations affecting the galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Our thoughts regarding the advantages of this approach are summarised in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 Quality of the rotation curve as a circular velocity tracer In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3, we show the rotation curves at the times of the last 3 snapshots (13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='10, 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='43, 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='76 Gyr) of each galaxy, as well as their circular velocity curves at the time of the last snapshot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The circu- lar velocity curve (purple line) at the times of the two preceding snapshots is invariably very similar to that at the time of the last snapshot, so we omit them from the figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Some rotation curves accurately trace the circular velocity curve, while others do not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Likewise, some galaxies have rotation curves that are highly variable over the ∼ 660 Myr spanned by the three snapshots, while others are quite stable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Guided by our visual impression of the curves in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3, we devised a summary statistic 𝑄 that captures these features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' It is defined: 𝑄 = 1 7 �4𝑞0 + 2𝑞0,1 + 𝑞1,2 � , (1) where: 𝑞0 = 𝑃0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 ����� 𝑣rot,0(𝑅𝑖) 𝑣circ,0(𝑅𝑖) − 1 ���� � (2) 𝑞0,1 = 𝑃0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 ����� 𝑣rot,0(𝑅𝑖) 𝑣rot,1(𝑅𝑖) − 1 ���� � (3) 𝑞1,2 = 𝑃0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 ����� 𝑣rot,1(𝑅𝑖) 𝑣rot,2(𝑅𝑖) − 1 ���� � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (4) 𝑣circ,0 is the circular velocity curve at the time of the last snapshot, 𝑣rot,0, 𝑣rot,1 and 𝑣rot,2 are the rotation curves at times of the last, second-last and third-last snapshots, respectively, 𝑅𝑖 are the radii where the curves are sampled, and 𝑃0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75(·) denotes the 75th per- centile.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The radii 𝑅𝑖 are evenly spaced every 500 pc out to the radius enclosing 90 per cent of the H i mass of the galaxy – since this varies with time, pairs of curves are compared using sampling points common to the pair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Conceptually, 𝑞0 measures how well the rota- tion curve traces the circular velocity curve (at the time of the final snapshot), while 𝑞0,1 and 𝑞0,2 measure the time variability of the rotation curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In all cases, smaller values indicate better agreement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The 75th percentile is used to enforce that ‘agreement’ between two curves must extend over most of the curves ( 3 4 of their extent) to ob- tain a correspondingly small value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The three 𝑞 values are combined as a weighted sum to give 𝑄, with slightly more weight placed on the agreement between the rotation curve and circular velocity curve than its time variability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The panels of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3 are arranged in order of increasing 𝑄.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' It is visually clear that the rotation curves of galaxies with higher 𝑄 do not trace the circular velocity curve as closely as those of galaxies with lower 𝑄, and are likewise more time-variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We divide galaxies into 4 classes based on the 𝑄 statistic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Of the 33 galaxies in our sample, 3 are labelled ‘class 1’ (‘excellent’ agreement between circular velocity MNRAS 000, 1–15 (2023) Rotation curves of low-mass galaxies 5 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Summary of perturbations affecting the gas kinematics in our sample of galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The rows are in order of increasing 𝑄 parameter (column 3) (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1) quantifying how closely the rotation curve traces the circular velocity curve;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' higher 𝑄 indicates poorer agreement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The range in 𝑄 is separated into 4 classes (column 1) from class 1, ‘excellent agreement’, to class 4, ‘poor agreement’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The remaining columns provide quantitative estimates of the strength of various perturbations with entries corresponding to a nominal ‘strong perturbation’ regime shown in bold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Further details are given in the specified sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Column (4): Time(s) since the big bang of the first pericentric passage of companions with DM mass ratio greater than 1:20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Currently strongly interacting companions are marked ‡, and the entire entry is shown in bold (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (5): Peak (most negative) bulk cylindrical radial atomic gas inflow rate during the last ∼ 600 Myr, values < −5 kpc Gyr−1 in bold (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (6): Peak bulk vertical (sgn(𝑧)𝑣𝑧) atomic gas expansion rate during the last ∼ 600 Myr, values > 1 kpc Gyr−1 in bold (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (7): DM halo major-to-intermediate axis ratio 𝑏/𝑎 at 𝑧 = 0 within an aperture with radius equal to twice the radius enclosing 90 per cent of the H i mass, values < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95 in bold (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (8): Angle between the angular momentum vectors of the inner and outer H i disc at 𝑧 = 0 , values > 30◦ in bold (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (9): Speed of the galaxy with respect to diffuse gas between 1 and 2 times 𝑟200 at 𝑧 = 0 , speeds > 50 km s−1 in bold (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' First pericentre of merger Peak radial bulk Peak vertical bulk DM halo Warp angle IGM wind speed Class Galaxy ID Q or interaction (Gyr) flow (kpc Gyr−1) flow (kpc Gyr−1) 𝑏/𝑎 𝜃warp 𝑣wind (km s−1) 1 AP-L1-V11-3-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='04 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='99 8◦ 26 1 AP-L1-V1-4-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='09 – −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='99 8◦ 22 1 AP-L1-V4-8-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='11 – −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='96 9◦ 17 2 AP-L1-V6-12-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='13 – −6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='98 6◦ 31 2 AP-L1-V6-8-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='14 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='99 3◦ 82 2 AP-L1-V1-8-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='14 – −5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='97 7◦ 15 2 AP-L1-V6-5-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='15 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 −5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='99 8◦ 43 2 AP-L1-V10-6-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='16 – −7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='91 44◦ 27 2 AP-L1-V6-19-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='16 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9‡ −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='97 13◦ 19 2 AP-L1-V11-6-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='17 – −6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 5◦ 32 2 AP-L1-V10-14-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='17 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9, 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 −5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='96 6◦ 64 2 AP-L1-V4-10-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='17 – −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='97 3◦ 25 3 AP-L1-V4-6-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='18 – −4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='96 49◦ 45 3 AP-L1-V11-5-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='18 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8, 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3‡, 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 −5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='91 12◦ 66 3 AP-L1-V1-7-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='18 – −4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95 41◦ 23 3 AP-L1-V4-14-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='18 – −5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='98 4◦ 30 3 AP-L1-V6-7-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='20 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9, 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5, 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8‡ −12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='87 51◦ 60 3 AP-L1-V10-30-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='20 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 −4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='98 4◦ 26 3 AP-L1-V6-16-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='22 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 −4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='96 17◦ 10 4 AP-L1-V6-20-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='23 – −10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='84 14◦ 50 4 AP-L1-V6-18-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='24 – −8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='90 9◦ 24 4 AP-L1-V10-19-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='26 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4, 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 −6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='93 5◦ 8 4 AP-L1-V4-13-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='26 – −3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='87 14◦ 10 4 AP-L1-V6-15-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='27 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7, 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5, 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 −6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='91 3◦ 8 4 AP-L1-V10-22-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='29 – −5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='99 11◦ 6 4 AP-L1-V6-6-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='30 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3‡, 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1, 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1‡ −7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 6◦ 26 4 AP-L1-V10-16-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='33 – −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='93 10◦ 46 4 AP-L1-V10-20-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='34 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2, 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2, 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2, 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5, 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 −5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='92 38◦ 88 4 AP-L1-V10-5-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='37 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5‡ −9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='94 84◦ 33 4 AP-L1-V1-6-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='40 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4‡ −7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='90 19◦ 40 4 AP-L1-V10-17-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='42 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 −7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='93 47◦ 31 4 AP-L1-V6-11-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='48 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 −4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='91 22◦ 26 4 AP-L1-V10-13-0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='69 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 −4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='89 93◦ 26 and rotation curves;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='125), 9 ‘class 2’ (‘good’ agreement;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='125 ≤ 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='175), 7 ‘class 3’ (‘fair’ agreement;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='175 ≤ 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='225) and 14 ‘class 4’ (‘poor’ agreement;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 𝑄 ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='225).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The 𝑄 values of galaxies are tabluated in Table 1, with rows ordered by increasing 𝑄.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The same table also provides a concise summary of various effects that can (and often do) perturb the rotation curves of the galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We discuss each in turn in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2, but first give a brief qualitative overview.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The overall impression that emerges immediately on visual in- spection of the videos of the galaxies in our sample is one of rich variety, both in galaxy properties and in the perturbations that they are undergoing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Whilst selected with a simple criterion: 60 < 𝑣max/km s−1 < 120, there are large galaxies with gas discs extending nearly 30 kpc in radius, but also tiny galaxies which barely resemble discs (radii as small as 2 kpc).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Some galaxies have obvi- ous, strong gas outflows, while others are rapidly accreting new gas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There are several instances of galaxies losing the majority of their gas and then accreting a new disc that is highly inclined relative to the previous disc, resulting in a strongly warped disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Some galaxies are very elongated and/or have frequent lopsided (harmonic of order 𝑚 = 1) perturbations, while others have a long-term stable, circular disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Mergers and interactions with companions are common, with a range of impact parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In many cases the gas merges quickly, while the secondary DM halo completes several orbits before fully merging, visibly disturbing the gas kinematics at each pericentric passage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A few complicated triple mergers are also present in the sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Other common disruptions include non-merging interactions with gas-rich or gas-less haloes (for our selection in 𝑣max, we do not find any star-less or ‘dark’ galaxies that significantly perturb the gas kinematics).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In some cases a wind from motion through the inter- MNRAS 000, 1–15 (2023) 6 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing & K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman 0 5 10 15 20 25 0 20 40 60 80 100 120 AP-L1-V11-3-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='04 0 5 10 15 20 0 20 40 60 80 100 AP-L1-V1-4-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='09 0 2 4 6 8 10 0 10 20 30 40 50 60 70 AP-L1-V4-8-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='11 0 2 4 6 8 10 12 14 0 20 40 60 80 AP-L1-V6-12-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='13 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 0 20 40 60 80 AP-L1-V6-8-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='14 0 2 4 6 8 0 10 20 30 40 50 60 70 AP-L1-V1-8-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='14 0 2 4 6 8 10 12 0 20 40 60 80 AP-L1-V6-5-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='15 0 2 4 6 8 10 12 14 0 20 40 60 80 100 AP-L1-V10-6-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='16 0 2 4 6 8 10 0 20 40 60 80 AP-L1-V6-19-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='16 0 5 10 15 20 25 0 20 40 60 80 100 AP-L1-V11-6-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='17 0 5 10 15 20 0 10 20 30 40 50 60 70 AP-L1-V10-14-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='17 0 5 10 15 20 0 10 20 30 40 50 60 70 AP-L1-V4-10-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='17 0 1 2 3 4 5 6 7 0 20 40 60 80 100 AP-L1-V4-6-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='18 0 5 10 15 20 25 0 20 40 60 80 100 AP-L1-V11-5-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='18 0 1 2 3 4 5 6 7 0 20 40 60 80 AP-L1-V1-7-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='18 0 2 4 6 8 10 12 14 0 20 40 60 80 Velocity [km s−1] AP-L1-V4-14-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='18 0 2 4 6 8 10 12 0 20 40 60 80 AP-L1-V6-7-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='20 0 2 4 6 8 10 0 10 20 30 40 50 60 70 AP-L1-V10-30-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='20 0 1 2 3 4 5 6 7 0 10 20 30 40 50 60 70 AP-L1-V6-16-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0 10 20 30 40 50 60 70 AP-L1-V6-20-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='23 0 1 2 3 4 0 10 20 30 40 50 60 70 AP-L1-V6-18-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='24 0 2 4 6 8 10 0 10 20 30 40 50 60 70 AP-L1-V10-19-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='26 0 2 4 6 8 0 10 20 30 40 50 60 70 AP-L1-V4-13-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='26 0 2 4 6 8 0 10 20 30 40 50 60 70 AP-L1-V6-15-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='27 0 2 4 6 8 0 10 20 30 40 50 60 70 AP-L1-V10-22-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='29 0 5 10 15 20 0 20 40 60 80 AP-L1-V6-6-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='30 0 2 4 6 8 10 12 14 0 20 40 60 80 AP-L1-V10-16-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='33 0 2 4 6 8 10 0 20 40 60 80 AP-L1-V10-20-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='34 0 2 4 6 8 10 0 20 40 60 80 100 120 AP-L1-V10-5-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='37 0 1 2 3 4 0 10 20 30 40 50 60 70 AP-L1-V1-6-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='40 0 1 2 3 4 5 6 7 8 0 10 20 30 40 50 60 70 AP-L1-V10-17-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='42 0 1 2 3 4 5 0 10 20 30 40 50 60 70 AP-L1-V6-11-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='48 0 1 2 3 4 5 6 7 Radius [kpc] 0 20 40 60 80 100 AP-L1-V10-13-0 Q = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='69 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Rotation curves for all galaxies for the last three simulation snapshots (13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='10, 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='43 and 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='76 Gyr, shown in increasing opacity) coloured by class – class 1 (green, 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='125);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' class 2 olive, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='125 ≤ 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='175);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' class 3 (orange, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='175 ≤ 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='225);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' class 4 (red 𝑄 ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='225).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 𝑄 is a measure of how well the rotation curve traces the circular velocity curve over time (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The purple curves show the 𝑧 = 0 circular velocity curve of each galaxy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' galactic medium (IGM) seems to cause strong 𝑚 = 1 deformations of the disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 Mechanisms perturbing the rotation curve 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 Mergers and close companions Close interactions and mergers with gas-rich companions cause the most obvious disturbances to rotation curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Gas-less (but not nec- essarily dark) companions cause less disruption, but can still visibly disturb the gas kinematics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In most cases the effect of a gas-less com- panion on the rotation curve is minimal, and even in the most extreme cases the rotation curve is usually still a reasonably good tracer of the circular velocity curve, if no other perturbation is ongoing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Sim- ilar statements apply to the gas-less remnant of an initially gas-rich companion as it returns on subsequent orbital passages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We list two examples of interactions with companions (and all other types of perturbations discussed in subsections below) and where they can be most clearly seen in our collection of visualisations in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Using the merger trees (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1), we identify all companion galax- ies that merged into each galaxy in our sample and their progenitors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In addition, we track the progenitors and descendants of all compan- ion galaxies, defined as those found in the same FoF group as the galaxy at any time (but that did not later merge).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' For each compan- ion and merged object, we find its maximum DM mass at any time and compare it to the maximum DM mass of the galaxy of interest, discarding any with a mass ratio less than 1:20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We found that in- teraction with smaller mass ratios caused little visible disturbance to MNRAS 000, 1–15 (2023) Rotation curves of low-mass galaxies 7 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Perturbation type Visualisation file Time (Gyr) Description and comments Merger/companion (§3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1) AP-L1-V11-3-0-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 – 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 Merger with the gas disc of a companion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The com- panion arrives on a prograde orbit nearly in the plane of the disc around time 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 (this is actually the sec- ond passage, the first was around 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 Gyr, at this time the gas discs interacted but did not collide);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' the disc survives and settles (by time ∼ 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 Gyr).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Merger/companion (§3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1) AP-L1-V6-15-0-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 – 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 Merger with the gas disc of a companion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The com- panion arrives on an oblique prograde orbit;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' the disc is almost completely destroyed and does not re-form until time ∼ 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 Gyr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Radial inflows (§3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2) AP-L1-V4-14-0-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 – 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 Gas is visibly ejected from the disc, likely by a series of supernova explosions, around time 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 Gyr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This gas quickly begins to settle back onto the disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' While this is ongoing, the entire disc contracts radially.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Vertical outflows (§3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2) AP-L1-V4-13-0-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 – 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 Several prominent whisps of ejected gas are visible both above and below the disc (in the right panel of the video), launched over a period of a few hundred megayears.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Elongated halo (§3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3) AP-L1-V11-6-0-face-gas-and-dm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 all The DM halo is visibly elongated throughout, driving transient lopsided (𝑚 = 1 harmonic) and bisymmetric (𝑚 = 2) deformations of the gas disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' For example, at time 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 Gyr, the disc is both elongated and lopsided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The position angle of the elongation of the gas disc is visibly correlated with the position angle of the DM halo throughout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Warped disc (§3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4) AP-L1-V4-6-0-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 – 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 By 11 Gyr the gas disc is very small after being con- sumed by star formation and losing gas to supernova feedback.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Between 12 and 13 Gyr a large amount of gas accretes onto the disc, misaligned with the existing disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' By 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 Gyr the edge-on planes of the inner and outer discs are clearly visible in the right panel of the visualisation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' IGM wind (§3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5) AP-L1-V1-4-0-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 all Throughout the visualisation the diffuse gas surround- ing the disc has a noticeable net flow from right to left in the image, in both the face-on and edge-on views.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' the gas discs, and no noticeable perturbation to their rotation curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We define the time of the first pericentric passage of an interaction as the time of the earliest simulation snapshot when both galaxies are found in the same FoF group and the sign of the radial velocity difference between the companion and host is positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In Table 1, we list the times of first pericentric passages for all such interactions, excluding those before 8 Gyr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There are 25 in- teractions in total, occurring in 15 galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' All of the tabulated companions/mergers are initially gas rich – their peak (over time) gas-to-stellar mass ratios are ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We also note that all compan- ions and mergers with mass ratios greater than 1:20 had stars – perturbations due to ‘dark’ galaxies are unimportant for the galaxies in our sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The collision of two gas discs in a 1:20 or greater merger invariably strongly and globally disturbs the gas morphol- ogy and kinematics, making any other possible perturbations moot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We therefore flag ongoing interactions (which may persist long af- ter the first pericentric passage), defined as those where the closest approach of the companion occurs during the last 3 simulation snap- shots (∼ 650 Myr) and is closer than 25 kpc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' These are marked ‡ in Table 1 and shown with open symbols in later figures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' It is clear from Table 1 that galaxies with an ongoing interaction with a gas-rich companion have rotation curves that are poor trac- ers of the circular velocity (classes 3 & 4, with one exception in class 2 where the mass ratio was close to 1:20 to begin with and the companion has been heavily stripped by the end of the simulation).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' However, galaxies may recover quickly (in a little more than a dy- namical time) from earlier interactions, depending on the mass ratio, impact parameter, and the relative inclinations of the gas discs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' For example, galaxy AP-L1-V11-3-0 (class 1) finished merging with a massive (𝑀tot ∼ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3×1010 M⊙, dark matter mass ratio ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='13) com- panion around 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 Gyr (see entry in Table 2 for details), but since the approach was nearly in the plane of the gas disc, the disruption of the disc was limited and the gas settled after the merger ended, such that the rotation curves trace the circular velocity very well by 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 Gyr and thereafter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' On the other hand, galaxy AP-L1-V6-15-0 (class 4) experienced an oblique collision with a galaxy (𝑀tot ∼ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3×109 M⊙, dark matter mass ratio ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='15), dispersing nearly all of the primary galaxy’s gas on its first approach (∼ 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 Gyr), before the now partially stripped, but still gas-rich companion halo returns and the rest of the gas merges (∼ 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 Gyr).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This dramatic event severely disrupts both the gas disc and DM halo, and the gas dynamics are entirely out of equilibrium until ∼ 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 Gyr where they begin to settle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' By 𝑧 = 0 the rotation curve still underestimates the circular velocity overall and has radially localised features (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' ‘wiggles’).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' MNRAS 000, 1–15 (2023) 8 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing & K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman −12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 −10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 −7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 −5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 −2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 Peak bulk radial inflow rate [kpc Gyr−1] weak bulk flows strong bulk flows ongoing interaction with companion 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 Q −2 0 2 4 6 Peak bulk vertical outflow rate [kpc Gyr−1] Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Correlations of bulk gas flows with degree to which the rotation curve traces the circular velocity curve, 𝑄.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Upper panel: The average radial velocity (in cylindrical coordinates) of ‘atomic’ gas particles in each galaxy in our sample is calculated at each of the last 3 snapshots (13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='10, 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='43 and 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='76 Gyr), and the minimum value (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' peak inflow rate) is plotted on the vertical axis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Lower panel: The average vertical velocity away from the disc midplane (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' sgn(𝑧)𝑣𝑧) of ‘atomic’ gas particles in each galaxy in our sample is calculated at each of the last 3 snapshots, and the maximum value (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' peak outflow rate) is plotted on the vertical axis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Galaxies currently strongly interacting with a companion (marked ‡ in Table 1) are plotted with open symbols.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Galaxies with stronger bulk flows have preferentially higher 𝑄 values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Galaxies with peak radial inflow rates stronger (more negative) than −5 kpc Gyr−1 and/or peak vertical outflow rates greater than 1 kpc Gyr−1 (red dashed lines;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' galaxies with flows stronger than either or both limits are shown with red markers) are not found in class 1 (green background), and no galaxies without strong bulk flows are found in class 4 (red background).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 Bulk non-circular gas flows Bulk non-circular gas flows (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' radial or vertical flows) directly violate the assumption of rotational support implied by the expecta- tion that the rotation curve of a galaxy should agree with its circular velocity curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Bulk outflows in low-mass galaxies in APOSTLE are driven predominantly by the injection of thermal energy by super- novae and are preferentially ejected along the ‘path of least resis- tance’: vertically from the disc (see Table 2 for an example).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Bulk inflows within the disc, on the other hand, tend to be radial and are usually associated with gas accretion (see example in Table 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We quantify bulk non-circular gas flows as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We focus on the atomic gas disc by first selecting only ‘atomic’ gas particles, which we recall that we define as those with H i mass fractions of > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5, and then selecting only those particles within a cylindrical aperture with a radius equal to the radius enclosing 90 per cent of the H i mass of the galaxy, and a half-height equal to the half-height enclosing 90 per cent of the H i mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We calculate the radial (in cylindrical coordinates) and vertical bulk flow rates of the selected particles as their mass- weighted average radial and vertical velocities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' For the vertical flow rate, we use the speed towards or away from the disc midplane (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' sgn(𝑧)𝑣𝑧).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Calculating these flow rates for a few consecutive simulation snapshots revealed that they are highly time-variable, motivating us to choose a summary statistic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The peak (most negative) radial inflow rates and peak (most positive) vertical outflow rates from the last 3 snapshots (13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='10, 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='43 and 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='76 Gyr) are plotted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 4 against the 𝑄 parameter defined in equation (1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' To emphasize that these flow rates capture a global contraction/expansion of the disc rather than e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' the speed of gas selected to be ‘outflowing’ or ‘inflowing’, we show values in units of kpc Gyr−1 (rather than e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' km s−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We also note that ‘harmonic’ non-circular motions, such as a bar-like distortion of the gas orbits, are not captured in this measurement because such distortions do not result in a net transport of gas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 4 shows that galaxies with stronger inflows and/or outflows tend to have rotation curves that are poorer tracers of their circu- lar velocity curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We illustrate this by plotting galaxies with radial inflow rates stronger (more negative) than −5 kpc Gyr−1 and/or verti- cal outflow rates greater than 1 kpc Gyr−1 (approximately the median flow rates for galaxies in our sample) with red markers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Entries in Table 1 exceeding these thresholds are also highlighted in bold face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' By this measure, most galaxies in our sample (26/33) have strong bulk flows in at least one of these two directions, but no galaxies in our class 1 (𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='125) do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There is furthermore a clear correlation between each of the two peak flow rates and 𝑄, albeit with large scatter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The connection between bulk flows, in the vertical direction in particular, was one of the first that we noticed in our initial visual analysis of our collection of videos: a lack of visible outflows from a galaxy is a strong predictor that its rotation curve will be a good tracer of its circular velocity curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' However, given the diversity of perturbations which can cause rotation curves to differ from the circular velocity curve, having weak bulk flows does not guarantee this to be the case, as is evident from exceptions such as AP-L1-V10- 16-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This galaxy has amongst the weakest bulk flows in our sample, but falls in class 4 (𝑄 ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='225).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 Dark matter halo shape Elongated or triaxial DM haloes give rise to non-circular gas orbits, with gas often visibly sloshing around in the aspherical potential (see Table 2 for an example).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In galaxies where this mechanism is effective, the rotation curves are highly variable as strong, transient lopsided (harmonic of order 𝑚 = 1) and bisymmetric (𝑚 = 2) modes are excited in the gas disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 5 shows the anti-correlation between the intermediate-to-major axis ratio of the DM halo and the 𝑄 pa- rameter defined in equation (1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We focus on the shape of the halo in the region occupied by the disc by calculating axis ratios using DM particles in a spherical aperture with a radius equal to twice the radius enclosing 90 per cent of the galaxy’s H i mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The squares of the axis lengths are proportional to the eigenvalues of the reduced inertia tensor: 𝐼𝑖 𝑗 = � 𝑛 𝑚𝑛 𝑟𝑛,𝑖𝑟𝑛, 𝑗 𝑟2𝑛 � 𝑛 𝑚𝑛 , (5) where 𝑟𝑛 and 𝑚𝑛 are the coordinate vector and mass of particle 𝑛, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Even very small departures from 𝑏/𝑎 = 1 seem to be sufficient to drive large changes in the rotation curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The red dashed line in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 5 marks 𝑏/𝑎 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95 – no galaxies with 𝑏/𝑎 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95 fall in our class 1 (𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='125), and all save one class 4 (𝑄 ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='225) galaxies have 𝑏/𝑎 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We highlight the entries for galaxies with 𝑏/𝑎 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95 in MNRAS 000, 1–15 (2023) Rotation curves of low-mass galaxies 9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 Q 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='850 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='875 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='900 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='925 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='950 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='975 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='000 b/a not currently interacting ongoing interaction with companion Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Anti-correlation between DM halo intermediate-to-minor axis ratio 𝑏/𝑎 (measured from reduced inertia tensor of DM particles within a spherical aperture with radius equal to twice the radius enclosing 90 per cent of galaxy’s H i mass) and the degree to which the rotation curve traces the circular velocity curve, 𝑄.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' An aspherical halo (𝑏/𝑎 ≲ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95, marked by the dashed red line and in bold in Table 1) is a strong predictor of poor agreement between the rotation curve and the circular velocity curve, but a spherical halo does not guarantee close agreement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The coloured background marks the same intervals in 𝑄 as introduced in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Table 1 – these make up 17 of the 33 galaxies in our sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The anti- correlation in the figure has considerable scatter, reflecting the fact that a galaxy with a spherical halo can be perturbed by some other mechanism, but an aspherical halo seems to be a strong predictor of the rotation curve being a poor tracer of the circular velocity curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Although not shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 5, we also investigated trends in 𝑄 as function of the minor-to-major axis ratio (𝑐/𝑎) and the triaxiality parameter (𝑇 ≡ 𝑎2−𝑏2 𝑎2−𝑐2 ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' These show somewhat weaker trends than that with 𝑏/𝑎, suggesting that a prolate or triaxial halo shape (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 𝑏/𝑎 ≠ 1) has a stronger perturbative effect than an oblate shape (𝑏/𝑎 ∼ 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This agrees with intuition: a light, rotationally supported disc has possible stable configurations in the potential an oblate halo, but is unstable in a prolate or triaxial potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The mass of the gas disc also plays a role.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A more massive disc may resist the perturbative effect of an aspherical halo, or even ‘spheri- calise’ the halo if it is massive enough.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We will return to the impor- tance of the gas disc mass in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 Warped discs Several galaxies in our sample have visible warps in their gas discs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' one example of a prominent warp is listed in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We quantify the strength of a warp by the angle 𝜃warp between the angular momentum vectors of the inner and outer gas discs, which we define as the inner 30 per cent and outer 60-90 per cent of the H i gas by mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We plot 𝜃warp against our 𝑄 parameter defined in equation (1) in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Most galaxies in our sample have warp angles of less than ∼ 20◦ and these span the entire range in 𝑄, but a minority have large warp angles caused by rapid accretion of gas with angular momentum strongly misaligned with the existing disc, or an interaction with a companion – these galaxies have preferentially higher 𝑄 values and fall in our classes 3 & 4 (𝑄 ≳ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='175).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We flag galaxies with 𝜃warp > 30◦ (dashed line in the figure) as strongly warped, and highlight their entries in Table 1 in bold face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There are 8 such galaxies in our 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 Q 0 20 40 60 80 θwarp [deg] not currently interacting ongoing interaction with companion Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Correlation between warp angle 𝜃warp (defined as the angle between the angular momentum vectors of the inner 30 per cent and the outer 60 − 90 per cent of the H i gas, by mass) and the degree to which the rotation curve traces the circular velocity curve, 𝑄.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A strong warp (≳ 30◦, marked by the dashed red line and in bold in Table 1) is associated with a poor agreement between the rotation and circular velocity curves (𝑄 ≳ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The coloured background marks the same intervals in 𝑄 as introduced in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We note that the influence of a warp on the rotation curve is somewhat exaggerated in our analysis because we have measured the rotation curves as the median azimuthal velocity of particles in a fixed plane aligned with the inner disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The rotation speed in a warped outer disc is therefore underestimated by about a factor of cos 𝜃warp, which will be reflected in the measured 𝑄 value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 IGM wind In the visualisations of some of our galaxies, a ‘wind’ blowing against the gas disc due to its motion through the IGM is clearly visible, and appears to deform the disc, often resulting in a lopsided disc displaced ‘downwind’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' One example where this effect is especially clear is given in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We note, however, that the galaxy in question is in class 1 (𝑄 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='09) and has an IGM wind speed at 𝑧 = 0 (see below for details) close to the median in our sample of galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This highlights both the difficulty in quantifying the strength of the wind and its potentially quantitatively subtle effect on the kinematics of the gas disc, despite the perturbative effect of the wind being visually very clear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' As a consequence, our efforts to quantify such perturbations have yielded less clear-cut results than for the other types of perturbations discussed above, suggesting that perturbation due to a wind may be more nuanced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The effect on the rotation curve likewise often seems to be fairly subtle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We estimate the speed of the IGM wind as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We first select gas particles in a spherical shell between one and two times the virial4 radius around the galaxy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In order to avoid undue bias by other nearby galaxies, we further restrict our selection to include only those particles not gravitationally bound to any subhalo according to the halo finder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We take the median velocity in the rest frame of the galaxy (the same frame used when measuring the rotation curves) of the remaining selected particles to be the IGM wind velocity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We have 4 We define the virial radius as the radius of a sphere within which the mean matter density is 200 times the critical density 𝜌crit = 3𝐻 2 0 /8𝜋𝐺.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' MNRAS 000, 1–15 (2023) 10 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing & K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 Q 0 20 40 60 80 vwind [km s−1] not currently interacting ongoing interaction with companion Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' IGM wind speed 𝑣wind, calculated as the median velocity of the gas particles between 1 and 2 times 𝑟200 of the galaxy and not belonging to any FoF group in a frame of reference where the gas disc is at rest (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 for details), plotted against the degree to which the rotation curve traces the circular velocity curve, 𝑄 (Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' No strong trend is visible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The dashed red line marks 50 km s−1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' we consider galaxies above this line to be the strongest outliers in 𝑣wind, and mark the corresponding values in bold in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The coloured background marks the same intervals in 𝑄 as introduced in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' verified that the conclusions that we reach are not very sensitive to the precise radial range used (within a factor of about 3), or whether or not bound particles are included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The speed of the wind 𝑣wind is plotted against the 𝑄 parameter defined in equation (1) in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Any correlation is less clear than those seen in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 4–6 above, but there is tentative evidence that the galaxies with the highest IGM wind speeds in our sample have higher 𝑄 values, or at least avoid the lowest 𝑄 values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We draw a boundary at 50 km s−1 (dashed line in the figure) separating the galaxies with the highest wind speeds (5 of the 33 in our sample) from the others, and highlight the entries corresponding to the galaxies above this threshold in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In addition to the caveats listed above, we are cautious in our interpretation of perturbations due to the IGM wind because we struggle to find a clear correspondencebetweenthegalaxies where we identified what appeared to be a wind in our visualisations and those with a high wind speed (or other similar quantitative measures that we explored).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Furthermore, our impression from our visual inspection is that periods of strong IGM wind are often short-lived (the example in Table 2 is an exception to this), and any perturbation of the rotation curve does not seem to persist after the wind subsides.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 Summary Taking all of these various kinds of perturbations into account, it is perhaps unsurprising that so few galaxies in our sample have a rotation curve that closely traces their circular velocity curve at 𝑧 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' However, looking at Table 1, there are also a few galaxies that have avoided any obvious recent disturbance and yet have rotation curves that are poor tracers of their circular velocity curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The galaxy AP-L1-V10-30 presents an intriguing case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The visualisations (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' AP-L1-V10-30-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4) do not reveal any obvious perturbations in any of the categories discussed above at late times, except perhaps some vertical outflows from the disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Inspecting its entry in Table 1, none of its properties exceed our (admittedly somewhat arbitrary) thresholds for strong perturbations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' And yet, its late-time rotation curve (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3) significantly underestimates the circular velocity curve in the central ∼ 2 kpc, and is time-variable in the outer regions of the disc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The presence of such an example in our sample of galaxies emphasizes that we have only scratched the surface of a complex topic: it is clear that many types of perturbations significantly influence the gas kinematics in low-mass galaxies, but a more complete understanding of the prevalence and importance of each type will require further study, often on a galaxy-by-galaxy basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Ultimately, given the intrinsically limited information available from observations of real galaxies, a practical question to ask is: are galaxies where the rotation curve is a good tracer of the circular ve- locity curve separated from others in terms of observable properties?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In our exploration of our sample of galaxies, it became clear very quickly that gas mass (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' at fixed stellar mass) plays an important role.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Once galaxies with ongoing interactions or mergers are removed from consideration, galaxies with higher gas mass are more likely to have rotation curves that trace the circular velocity well, and vice versa (see further discussion in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 below).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This is tentatively consistent with the stabilising effects of a massive disc against some perturbations, including the influence of an aspherical DM halo (the type of perturbation with the clearest effect out of those that we investigated, save mergers), as mentioned above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 Trends with galaxy scaling relations We plot each pair-wise relation between gas mass, stellar mass, and 𝑣max at 𝑧 = 0 in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 8 (tabulated values are available in Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019, table A1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We also plot the data from the SPARC compilation (Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016a) with small grey points5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The simulated galaxies broadly follow observed trends in these relations;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' we do not discuss the comparison further (see Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019, for a detailed com- parison).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Points for simulated galaxies are coloured by their their class, from green (class 1) to red (class 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We plot galaxies with a recent merger or interaction with a companion (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1) with an open symbol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Considering the lower panels, we highlight that all non-interacting galaxies with gas mass greater than 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 × 109 M⊙ (9 galaxies) are in class 1 or 2, while only 2/17 non-interacting galaxies with lower gas masses are.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We note that galaxies with recent/ongoing interactions or mergers have preferentially higher gas masses (at fixed 𝑣max) than non-interacting galaxies – this is unsurprising, since all companion galaxies of galaxies in our sample bring a lot of gas with them (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There is no similarly clear separation between points of different colours in maximum circular velocity or stellar mass, besides some weak trends coming from the fact that gas mass correlates with both of these parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' It is tempting to attribute gas mass being more important than stellar mass in this context to the galaxies in our sample having gas masses exceeding their stellar masses, such that the gravitational influence of the stars on the gas kinematics is not dominant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' However, the gas is typically more extended, so the stars can still dominate the gravitational potential near the centre, and can exert a strong non- gravitational influence through supernova feedback.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The strength of supernova feedback would be expected to correlate instead with 5 The SPARC compilation provides a ‘quality flag’ from 1 (best) to 3 (not suitable for mass modelling).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Since in this work we are interested in galaxies spanning the full range in rotation curve quality, we include all SPARC galaxies in all figures where they are shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' MNRAS 000, 1–15 (2023) Rotation curves of low-mass galaxies 11 109 1010 M⋆ [M⊙] SPARC (Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016a) not currently interacting ongoing interaction with companion 109 1010 M⋆ [M⊙] 109 1010 Mgas = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='33MHI [M⊙] class 1 class 2 class 3 class 4 100 60 70 80 90 vmax [km s−1] Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Pair-wise relations between gas mass, stellar mass, and 𝑣max, with APOSTLE galaxies plotted as larger points and coloured by class (class 1 with 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='125 as green, class 2 with 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='125 ≤ 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='175 as olive, class 3 with 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='175 ≤ 𝑄 < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='225 as orange, class 4 with 𝑄 ≥ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='225 as red) and open points showing galaxies with a recent gas-rich merger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' SPARC data from Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2016a) are plotted with small grey points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The areas outside of our selection 60 < 𝑣max/km s−1 < 120 are shaded in the right panels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' recent star formation, but we did not find any strong trend with recent star formation rate (not shown).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Considering the lower-right panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 8, we were surprised not to find a stronger dependence on a combination of 𝑣max and 𝑀gas, as might be expected if the gas-to-total mass ratio was a primary driver of 𝑄.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We note, however, that a larger sample of simulated galaxies would be very helpful in exploring these issues further.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We next turn our attention to the possible biases introduced into ob- servational scaling relations involving rotation curve measurements by the types of perturbations discussed above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We emphasize that we investigate here only the ‘direct’ impact due to the difference between the rotation curve and the circular velocity curve – the rotation curves that we consider are those that an observer with perfect knowledge of the gas kinematics would measure: the median azimuthal velocity as a function of radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' With real observations, this direct impact is likely to be compounded by additional errors induced by e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' attempting to model a non-equilibrium system assuming equilibrium dynamics, assuming circular orbits when the actual orbits are non-circular, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (see e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Read et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Sellwood et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2021;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Roper et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We consider the BTFR (McGaugh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2000), and 𝑉fid − 𝑉max relation (Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2020) quantifying the shapes of rotation curves, as illustrative examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 The BTFR The upper panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 9 shows the BTFR of the galaxies in our sample, along with galaxies from the SPARC compilation (Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016a) and the BTFR of SPARC galaxies reported by Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2016b) to provide context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We do not undertake a comparison with the observed BTFR in this work, as this has previously been addressed by Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2016) and Sales et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In this panel, the horizontal axis shows 𝑣max, the maximum of the circular velocity curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This can be thought of as the ‘truth’ that is obtained in the ideal case where the gas rotation curve follows the circular velocity curve and the measurement of the rotation curve is without error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We also plot an indicative linear fit to the points in this panel as a black solid line, excluding interacting/merging galaxies (open symbols) from the calculation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The fit minimizes the sum of the squared offsets in 𝑀bar from the BTFR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The best-fitting slope (𝑀bar ∝ 𝑣𝛼max) is 𝛼 = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In the centre panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 9, the coloured points show a mea- surement of the maximum rotation velocity of the gas, determined MNRAS 000, 1–15 (2023) 12 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing & K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman from the flat portion of the rotation curve following the approach of Roper et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2022, Appendix C;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' if the rotation curve is still rising at the outermost point, the value at this point is used) – we label this 𝑣flat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Each point is joined to its position in the upper panel by a solid line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Unsurprisingly, galaxies in our classes 3 & 4 move further (on average) from their positions in the upper panel than those in our classes 1 & 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Furthermore, nearly all points shift to the left, as the rotation curves preferentially underestimate the circular velocity curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This is emphasized in the lower panel of the figure, where the ratio 𝑣flat/𝑣max is plotted against 𝑣max – here it is clear that the un- derestimates get systematically worse towards lower 𝑣max (or lower 𝑀bar).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The 𝑣max and 𝑣flat values for each galaxy in our sample are tabulated in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The trends evident in the bottom panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 9 mean that the BTFR is biased to a higher normalisation (because 𝑣flat systemati- cally underestimates 𝑣max), and shallower slope (because the under- estimates get worse at lower 𝑣max).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The change in slope is illustrated in the centre panel by the dashed line, which has a slope of 𝛼 = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This shows a linear fit to the filled points in this panel, similar6 to the solid line (repeated in the upper and centre panels).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In the APOSTLE simulations, the BTFR has a steep cutoff around 𝑣max = 50 km s−1 (see Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Sales et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Replacing 𝑣max with 𝑣flat seems to soften the cutoff, potentially enough for the trend to become more reminiscent of the constant slope often claimed in observational studies (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' McGaugh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2000;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Ponomareva et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019), although an analysis including galaxies at lower 𝑣max would be needed to confirm this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' If observed galaxies are subject to broadly similar perturbations as those that we observe in our simulations, which seems likely, then the observed BTFR is probably biased in a similar sense.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The magnitude of the effect, however, depends on the details of each type of perturbation and their relative frequencies, which the simulations may not capture in full detail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Interestingly, attempting to remove galaxies where the rotation curve does not trace the circular velocity curve from a sample to be used to measure the BTFR likely still results in a bias, because those galaxies where the rotation curve is a good tracer of the circular velocity curve are not an unbiased sub-sample: they tend to be the most gas-rich galaxies and to have higher 𝑀bar at fixed 𝑣max (see discussion of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There is observational evidence for such a bias: Papastergis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2016) found that using a sample selected to be extremely gas-rich (𝑀gas/𝑀★ ≳ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7) yields a steeper slope for the BTFR 𝑀bar ∝ (𝑊/2)𝛼 as a function of H i line width 𝑊 than studies with less extreme selections (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Zaritsky et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Hall et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' McGaugh 2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' They find 𝛼 = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='11 (rather than 𝛼 ∼ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3–3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Ball et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2022) similarly find that restricting their sample to gas- rich (𝑀HI/𝑀★ > 2) galaxies significantly increases the slope of the BTFR, from about 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3 to 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' They also find that dividing their galaxy into high- and low-baryonic mass sub-samples (at 𝑀bar = 1010 M⊙ gives different slopes, of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' However, Gogate et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2022) instead find no strong dependence on gas fraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' All of these studies use spatially-integrated spectral line widths for the velocity axis of the BTFR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Searching for similar trends when spatially resolved rotation curves are used instead is an interesting avenue for future studies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 6 We have constrained the fit to intersect that from the upper panel at 𝑣max = 120 km s−1, loosely motivated by the BTFR being best constrained around this maximum circular velocity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Without this constraint, the best-fitting line has a much shallower slope that we attribute to the sparse sampling at higher 𝑣flat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' For each galaxy in our sample: maximum circular velocity (𝑣max);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' ra- tio of flat value of the rotation curve (𝑣flat, see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1) and 𝑣max;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' amplitude of the circular velocity curve at the ‘fiducial radius’ (𝑣fid,circ, see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' ratio of the amplitude of the rotation curve at the ‘fiducial radius’ (𝑣fid,rot) and 𝑣fid,circ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Galaxies are in order of increasing 𝑄, as in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 𝑣max 𝑣flat 𝑣max 𝑣fid,circ 𝑣fid,rot 𝑣fid,circ Class Galaxy ID (km s−1) (km s−1) 1 AP-L1-V11-3-0 118 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='96 103 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='89 1 AP-L1-V1-4-0 91 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='97 73 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='89 1 AP-L1-V4-8-0 69 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='97 48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 2 AP-L1-V6-12-0 76 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95 63 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 2 AP-L1-V6-8-0 76 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='91 57 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='86 2 AP-L1-V1-8-0 68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='95 52 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='99 2 AP-L1-V6-5-0 89 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='07 76 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='84 2 AP-L1-V10-6-0 103 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='81 80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='73 2 AP-L1-V6-19-0 61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='93 45 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='09 2 AP-L1-V11-6-0 88 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='93 71 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='54 2 AP-L1-V10-14-0 65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='81 50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='84 2 AP-L1-V4-10-0 66 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='62 3 AP-L1-V4-6-0 86 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='91 78 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='77 3 AP-L1-V11-5-0 91 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='79 69 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='79 3 AP-L1-V1-7-0 72 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='93 62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='83 3 AP-L1-V4-14-0 60 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='09 43 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='04 3 AP-L1-V6-7-0 68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='82 43 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='65 3 AP-L1-V10-30-0 61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 3 AP-L1-V6-16-0 65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='94 54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='68 4 AP-L1-V6-20-0 68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='91 59 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='98 4 AP-L1-V6-18-0 62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='78 55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='65 4 AP-L1-V10-19-0 67 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='01 47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='71 4 AP-L1-V4-13-0 65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='92 52 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='59 4 AP-L1-V6-15-0 62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='58 4 AP-L1-V10-22-0 65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='48 4 AP-L1-V6-6-0 67 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='05 46 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='84 4 AP-L1-V10-16-0 75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='92 53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='92 4 AP-L1-V10-20-0 73 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='02 63 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='41 4 AP-L1-V10-5-0 109 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='97 97 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='90 4 AP-L1-V1-6-0 60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='62 44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='64 4 AP-L1-V10-17-0 65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='60 47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='39 4 AP-L1-V6-11-0 60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='96 48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='08 4 AP-L1-V10-13-0 84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='60 77 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='57 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 The 𝑣fid − 𝑣max relation Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2020) adapted a relation introduced by Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2015) that relates the maximum rotation speed (or circular velocity) and the rotation speed (or circular velocity) at an inner radius, 𝑣fid = 𝑣(𝑟fid).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The radius 𝑟fid ≡ (𝑣max/70 km s−1) 2 kpc is defined to adapt to the scale of each galaxy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This quantifies the shape of the rotation curve or circular velocity curve: a more slowly rising curve (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' a rotation curve with a shallow inner slope) has a lower 𝑣fid at a given 𝑣max than a more steeply rising curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We plot this relation for our sample of simulated galaxies in the upper panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 10, here using 𝑣fid,circ and 𝑣max measured from their circular velocity curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We also plot measurements from the compilation of Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2020) for context (see Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Roper et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2022, for further discussion of the comparison).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In the second panel, we plot the locations of our sample of galaxies in the same space, but measured from their 𝑧 = 0 rotation curves (𝑣flat is measured as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The values corresponding to each plotted point for all galaxies in our sample are tabulated in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' MNRAS 000, 1–15 (2023) Rotation curves of low-mass galaxies 13 In addition to the tendency for rotation curves to underestimate the maximum circular velocity as discussed above, 29 of the 33 galaxies in our sample have a significantly lower rotation velocity than circular velocity at 𝑟fid, with the effect being severe for most class 4 galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In some cases the shape of the rotation curve is broadly preserved (displacements parallel to the solid grey line in the second panel), while in others the rotation curve rises much more slowly (vertical displacement downwards).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Interestingly, the resulting scatter in the space of 𝑣fid−𝑣flat is not dissimilar from that observed for the SPARC galaxies, with even some similarly extreme outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We caution, however, that in practice rotation curve measurements do not recover the median azimuthal velocity as a function of radius exactly but are subject to various systematic errors in modelling, especially in their central regions (Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The scatter in the lower panel of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 10 is therefore likely a lower bound on what would be obtained were these simulated galaxies ‘observed’ and modelled analogously to real galaxies – as is confirmed by Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2019) for a subset of the galaxies in our sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The discrepancy between the rotation curves of low-mass galaxies and their circular velocity curves may be a significant contributor to the diversity in the shapes of observed dwarf galaxy rotation curves highlighted by Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 4 CONCLUSIONS 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 Summary That the cold gas in some observed galaxies is out of equilibrium and is therefore a poor dynamical mass tracer is well known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' However, just how rare it may be that an atomic gas rotation curve can reasonably be interpreted as a circular velocity curve has not been previously been systematically explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Our visualisations of the gas kinematics of low-mass APOSTLE galaxies (60 < 𝑣max/km s−1 < 120) over the past ∼ 4 Gyr emphasize the wide variety of processes perturbing them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Only about a third (12/33) of the galaxies in our sample have rotation curves that we would describe as similar to their circular velocity curves, with examples of close matches being rarer still (3/33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' These are found at preferentially higher gas masses (𝑀gas ≳ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 × 109 M⊙).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Based on our visual inspection of galaxies and their recent history, the most frequent types of perturbations include: Mergers and interactions with gas-rich companion galaxies (6/33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Bulk radial gas inflows, likely driven by accretion (19/33), and vertical gas outflows, likely driven by supernovae (15/33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Prolate or triaxial DM halo shapes (17/33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Warps (8/33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Winds due to motion through the IGM (5/33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The fractions in parentheses indicate the fraction of galaxies in our sample that exceed the thresholds for ‘strong perturbations’ of the given type outlined in Secs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1–3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5 (entries in bold face in Ta- ble 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Only 5/33 galaxies in our sample avoid ‘strong’ perturbations in all of these categories at 𝑧 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Some of these types of perturbations (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' mergers) are readily identified observationally, such that the galaxy in question can be excluded from samples for kinematic analysis, but others (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' IGM wind, influence of triaxial DM halo) are much more subtle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Further- more, because susceptibility to perturbation correlates with galaxy properties such as total cold gas mass, omitting perturbed galaxies from analyses introduces biases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In particular, we find that this has probably led to an underestimate of the low-velocity slope of the baryonic Tully-Fisher relation, offering a straightforward explana- tion for the steeper slope for gas-rich galaxies found by (Papastergis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Whether our findings based on the APOSTLE simulations are ap- plicable to observed galaxies depends on how faithfully the simula- tions capture the relevant physical processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Of the main categories of perturbations that we see operating in the simulations, we would characterize only one (supernova-driven outflows) as sensitively de- pendent on modelling choices in which there is significant ambiguity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Other processes like the merger rate or the shapes of DM haloes are natural consequences of structure formation in a ΛCDM cosmology, and depend on physics that is well understood and straightforward to implement in the models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' There is only a single galaxy in our sample that we have flagged as having strong vertical outflows, but not any other strong perturbations at 𝑧 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Our main conclusion that a ma- jority of galaxies in the 𝑣max range of our sample have rotation curves that differ significantly from their circular velocity curves is therefore probably also applicable to real low-mass galaxies, but confirming this in other cosmological hydrodynamical galaxy formation models would reinforce this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Galaxies with non-equilibrium gas kinematics are therefore likely one of the main drivers of the observed kinematic diversity (as highlighted by Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2015) in dwarfs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2 Reflections on visualisation-driven analysis Our starting point for all of the analysis presented above was our col- lection of galaxy evolution visualisations and their circular velocity and rotation curves at the corresponding times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This allowed us to build a strong intuition for the perturbations affecting the galaxies in our sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The visualisations highlight the diversity and complex- ity of these low-mass galaxies in a way that cannot be fully captured by integrated properties (such as those in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 4–7) and provided important context for our more quantitative analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We can identify several instances where we would probably have reached qualita- tively different conclusions if the visualisations were not available to guide our intuition and analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The sheer wealth of information represented by the visualisations and rotation curves eventually motivated our choice to focus our analysis on the current time (𝑧 = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The gas discs of essentially every galaxy in our sample have been subject to different perturbative processes at different times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The time dimension of our data set remains largely unexplored, offering an interesting avenue for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The human eye is an exceptionally powerful tool for reducing com- plex visual information to simple patterns and trends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Visualisation- driven analysis of cosmological hydrodynamical simulations has, in our opinion, a largely untapped potential to advance our understand- ing of a wide variety of physical processes in galaxies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' ACKNOWLEDGEMENTS We thank I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Santos-Santos, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Ponomareva, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Fattahi and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Navarro for invaluable comments on an early draft of this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' KAO acknowl- edges support by the European Research Council (ERC) through Advanced Investigator grant to C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Frenk, DMIDAS (GA 786910), and by STFC through grant ST/T000244/1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' ERD was supported by a Durham Physics Developing Talent Award to K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This work used the DiRAC@Durham facility managed by the Institute for Computational Cosmology on behalf of the STFC DiRAC HPC Facility (www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='dirac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='uk).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The equipment was funded by BEIS cap- ital funding via STFC capital grants ST/K00042X/1, ST/P002293/1, MNRAS 000, 1–15 (2023) 14 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing & K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman ST/R002371/1 and ST/S002502/1, Durham University and STFC operations grant ST/R000832/1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' DiRAC is part of the National e-Infrastructure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This work has made use of NASA’s Astrophysics Data System.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' DATA AVAILABILITY The SPARC data are available at https://cdsarc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='cds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='unistra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' fr/viz-bin/cat/J/AJ/152/157, with supplementary data tabu- lated in Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2020), table A1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Access to the APOS- TLE simulation data is available on reasonable request to the cor- responding author.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Basic properties of galaxies in our sample are tabulated in Oman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2019), table A1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' REFERENCES Ball C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Haynes M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Jones M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Peng B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Durbala A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Koopmann R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Ribaudo J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', O’Donoghue A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2022, arXiv e-prints, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' arXiv:2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='08728 Benitez-Llambay A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2015, py-sphviewer: Py-SPHViewer v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0, doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5281/zenodo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='21703, http://dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='5281/zenodo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 21703 Benítez-Llambay A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Ludlow A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Navarro J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2019, MNRAS, 488, 2387 Binney J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Tremaine S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2008, Galactic Dynamics: Second Edition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Princeton University Press Blitz L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Rosolowsky E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2006, ApJ, 650, 933 Bose S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2019, MNRAS, 486, 4790 Bosma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 1981, AJ, 86, 1825 Bradford J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Geha M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', van den Bosch F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, ApJ, 832, 11 Brook C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Santos-Santos I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Stinson G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, MNRAS, 459, 638 Brooks A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Papastergis E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Christensen C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Governato F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Stilp A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Quinn T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Wadsley J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2017, ApJ, 850, 97 Bullock J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Boylan-Kolchin M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2017, ARA&A, 55, 343 Chan T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Kereš D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Oñorbe J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Hopkins P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Muratov A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Faucher- Giguère C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Quataert E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2015, MNRAS, 454, 2981 Crain R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2015, MNRAS, 450, 1937 Creasey P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Sameie O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Sales L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Yu H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Vogelsberger M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Zavala J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2017, MNRAS, 468, 2283 Dalla Vecchia C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schaye J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2012, MNRAS, 426, 140 Davis M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Efstathiou G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', White S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 1985, ApJ, 292, 371 Desmond H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2012, arXiv e-prints, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' arXiv:1204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1497 Di Cintio A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Brook C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Macciò A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Stinson G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Knebe A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Dutton A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Wadsley J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2014, MNRAS, 437, 415 Dolag K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Borgani S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Murante G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Springel V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2009, MNRAS, 399, 497 Fattahi A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, MNRAS, 457, 844 Flores R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Primack J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 1994, ApJ, 427, L1 Frosst M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Courteau S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Arora N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Stone C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Macciò A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Blank M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2022, MNRAS, 514, 3510 Gogate A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Verheijen M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', van der Hulst J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Jaffé Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2022, MNRAS, in press Haardt F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Madau P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2001, in Neumann D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Tran J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', eds, Clusters of Galaxies and the High Redshift Universe Observed in X-rays.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 64 (arXiv:astro-ph/0106018) Hall M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Courteau S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Dutton A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', McDonald M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Zhu Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2012, MNRAS, 425, 2741 Hayashi E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Navarro J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2006, MNRAS, 373, 1117 Helly J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Cole S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Baugh C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Benson A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Lacey C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2003, MNRAS, 338, 903 Hopkins P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2013, MNRAS, 428, 2840 Jahn E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2021, arXiv e-prints, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' arXiv:2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='00142 Kaplinghat M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Ren T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Yu H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2020, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Cosmology Astropart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2020, 027 Komatsu E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2011, ApJS, 192, 18 Lelli F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2022, Nature Astronomy, 6, 35 Lelli F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', McGaugh S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schombert J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016a, AJ, 152, 157 Lelli F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', McGaugh S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schombert J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016b, ApJ, 816, L14 Lelli F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', McGaugh S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schombert J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Desmond H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Katz H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2019, MNRAS, 484, 3267 Mancera Piña P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2019, ApJ, 883, L33 McGaugh S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2012, AJ, 143, 40 McGaugh S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schombert J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Bothun G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', de Blok W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2000, ApJ, 533, L99 Moore B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 1994, Nature, 370, 629 Navarro J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Eke V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 1996a, MNRAS, 283, L72 Navarro J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', White S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 1996b, ApJ, 462, 563 Oman K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2015, MNRAS, 452, 3650 Oman K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Navarro J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Sales L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Fattahi A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Sawala T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schaller M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', White S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, MNRAS, 460, 3610 Oman K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Marasco A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Navarro J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schaye J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Benítez- Llambay A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2019, MNRAS, 482, 821 Papastergis E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Giovanelli R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Haynes M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Shankar F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2015, A&A, 574, A113 Papastergis E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Adams E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', van der Hulst J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, A&A, 593, A39 Ponomareva A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Verheijen M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Papastergis E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Bosma A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Peletier R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2018, MNRAS, 474, 4366 Pontzen A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Governato F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2012, MNRAS, 421, 3464 Pontzen A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Governato F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2014, Nature, 506, 171 Power C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Navarro J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Jenkins A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', White S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Springel V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Stadel J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Quinn T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2003, MNRAS, 338, 14 Rahmati A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Pawlik A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Raičević M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schaye J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2013, MNRAS, 430, 2427 Read J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Gilmore G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2005, MNRAS, 356, 107 Read J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Iorio G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Agertz O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Fraternali F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, MNRAS, 462, 3628 Ren T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Kwa A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Kaplinghat M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Yu H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2019, Physical Review X, 9, 031020 Roper F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Oman K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Frenk C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Benítez-Llambay A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Navarro J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Santos-Santos I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2022, arXiv e-prints, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' arXiv:2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='16652 Rubin V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Ford W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Thonnard N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 1980, ApJ, 238, 471 Sales L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2017, MNRAS, 464, 2419 Sales L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Wetzel A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Fattahi A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2022, Nature Astronomy, 6, 897 Santos-Santos I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2020, MNRAS, 495, 58 Sawala T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, MNRAS, 457, 1931 Schaye J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2004, ApJ, 609, 667 Schaye J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Dalla Vecchia C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2008, MNRAS, 383, 1210 Schaye J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2015, MNRAS, 446, 521 Sellwood J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Spekkens K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Eckel C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2021, MNRAS, 502, 3843 Sorce J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Guo Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, MNRAS, 458, 2667 Spergel D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Steinhardt P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2000, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 84, 3760 Springel V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', White S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Tormen G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Kauffmann G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2001, MNRAS, 328, 726 Tollet E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2016, MNRAS, 456, 3542 Trujillo-Gomez S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Klypin A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Primack J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Romanowsky A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2011, ApJ, 742, 16 Tulin S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Yu H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2018, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Rep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 730, 1 de Blok W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2010, Advances in Astronomy, 2010, 789293 Valenzuela O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Rhee G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Klypin A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Governato F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Stinson G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Quinn T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Wadsley J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2007, ApJ, 657, 773 Verbeke R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Papastergis E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Ponomareva A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Rathi S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', de Rijcke S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2017, A&A, 607, A13 Wiersma R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schaye J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Smith B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2009a, MNRAS, 393, 99 Wiersma R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Schaye J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Theuns T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Dalla Vecchia C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', Tornatore L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2009b, MNRAS, 399, 574 Wingfield McQuinn K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2022, arXiv e-prints, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' arXiv:2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='10105 Zaritsky D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=', 2014, AJ, 147, 134 APPENDIX A: GALAXY VIDEOS, CIRCULAR VELOCITY AND ROTATION CURVES We include as supplementary material a collection of mp4 video files for each galaxy in our sample showing different views of their evo- lution over the past 4 Gyr ({AP-ID} is substituted with the identifier of each galaxy, such as AP-L1-V6-5-0): MNRAS 000, 1–15 (2023) Rotation curves of low-mass galaxies 15 {AP-ID}-composite-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 Side-by-side views of the galaxy seen face-on and edge-on, with a composite image of the projected DM density (grey scale) and gas density (purple-yellow colour).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' {AP-ID}-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 Side-by-side views of the galaxy seen face-on and edge-on, showing the projected gas density.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' {AP-ID}-face-gas-and-dm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 Side-by-side views of the galaxy seen face-on, in projected DM density (left) and gas density (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' {AP-ID}-edge-gas-and-dm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4 Side-by-side views of the galaxy seen edge-on, in projected DM density (left) and gas density (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Details of the creation of these visualisations is given in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' We note that in some cases the orientation of the camera is arbitrary in the initial frames of the videos – this is due to the angular momentum of the gas disc not being evaluated until the first snapshot (8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='94 Gyr) after the start time (8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='88 Gyr).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' In addition, we include a file {AP-ID}-rotation-curves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='pdf with a page showing at the time of each simulation snapshot: A plot showing the circular velocity curve (purple), and the median azimuthal velocity of atomic gas (orange) particles at the labelled time of the snapshot, measured as described in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The face-on (left) and edge-on (right) gas density images of the galaxy, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' matching those in {AP-ID}-gas-edge-and-face.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='mp4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Examples for a single galaxy are available on arXiv as ancillary files and can also be found at: http://icc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='dur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='uk/~txwx36/ share/DowningOman2023_supplementary/AP-L1-V6-5-0/ The complete collection of supplementary material can be downloaded from http://icc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='dur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='uk/~txwx36/share/ DowningOman2023_supplementary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='tar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='gz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' This paper has been typeset from a TEX/LATEX file prepared by the author.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 109 1010 Mbar = M⋆ + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='33MHI [M⊙] vmax SPARC (Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016a) Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2016b) not currently interacting ongoing interaction with companion 109 1010 Mbar = M⋆ + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='33MHI [M⊙] vflat fit (vmax) fit (vflat) 100 40 50 60 70 80 90 vmax or vflat [km s−1] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='9 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 vflat/vmax vmax class 1 class 2 class 3 class 4 Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The baryonic Tully-Fisher relation (BTFR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Upper panel: Baryonic mass against maximum circular velocity, 𝑣max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Point colours and open/filled symbols are as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The solid black line is a linear fit to the open symbols (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 for details).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The BTFR fit from Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2016b) is plotted with a thin solid grey line, and data from the SPARC compilation (Lelli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2016a) as small grey points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The areas outside of our selection 60 < 𝑣max/km s−1 < 120 are shaded in light grey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Centre panel: As upper panel, but with the measured maximum gas rotation velocity, 𝑣flat, on the horizontal axis (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='1 for details of how 𝑣flat is measured).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The points are joined to their respective locations in the upper panel by a horizontal line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The fit line from the upper panel is repeated, and a fit to the filled points in this panel is shown with a dashed black line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Lower panel: The ratio 𝑣flat/𝑣max, plotted against 𝑣max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The maximum gas rotation velocity systematically underestimates the maximum circular velocity (𝑣flat/𝑣max < 1), and the underestimates get systematically worse at lower 𝑣max;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' this has the potential to bias both the normalisation and the slope of the BTFR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' MNRAS 000, 1–15 (2023) 16 E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Downing & K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Oman 100 20 30 40 50 60 70 80 90 vfid,circ [km s−1] vmax 1:1 NFW (vfid ∼ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='65vmax) Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2020) not currently interacting ongoing interaction with companion 100 20 30 40 50 60 70 80 90 vfid,rot [km s−1] vflat class 1 class 2 class 3 class 4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='00 vflat/vmax vmax 100 40 50 60 70 80 90 vmax or vflat [km s−1] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='00 vfid,rot/vfid,circ vmax Figure 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Upper panel: The circular velocity measured at an inner, ‘fiducial radius’ 𝑟fid (see Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content='2) is plotted against the maximum circular velocity, 𝑣max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Lower central densities correspond to lower 𝑣fid,circ at fixed 𝑣max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The relation for an NFW density profile (Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 2020) is shown with a grey solid line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Data from the compilation of Santos-Santos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' (2020) are plotted with small grey points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Point colours and open/filled symbols are as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' The areas outside of our selection 60 < 𝑣max/km s−1 < 120 are shaded in light grey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Second panel: As upper panel, but the inner and outer rotation velocities 𝑣fid,rot and 𝑣flat are measured from the 𝑧 = 0 rotation curve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Points are joined by a line to their positions in the upper panel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Third panel: Ratio of the outer rotation velocity measured from the rotation curve and that measured from the circular velocity curve 𝑣flat/𝑣max, plotted against 𝑣max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Point colours and open/filled symbols as in upper panels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Lower panel: Ratio of the inner rotation velocity measured from the rotation curve and that measured from the circular velocity curve 𝑣fid,rot/𝑣fid,circ, plotted against 𝑣max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' Point colours and open/filled symbols as in upper panels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} +page_content=' MNRAS 000, 1–15 (2023)' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/p9E4T4oBgHgl3EQfvg1C/content/2301.05242v1.pdf'} diff --git a/ptAyT4oBgHgl3EQfzflz/content/2301.00702v1.pdf b/ptAyT4oBgHgl3EQfzflz/content/2301.00702v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..08912416c0e6178a9937bbd41ee65c2390fdc8da --- /dev/null +++ b/ptAyT4oBgHgl3EQfzflz/content/2301.00702v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d82a2dd88cbe410122403c64212eb9bca51ddcfc83081d762360ac3fb47ebc7a +size 1098084 diff --git a/qdE1T4oBgHgl3EQfPgMa/vector_store/index.pkl b/qdE1T4oBgHgl3EQfPgMa/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..16aae11d86bc43467aa58e78a82b8da10ab2d428 --- /dev/null +++ b/qdE1T4oBgHgl3EQfPgMa/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5dbf30754270b478ca85a593dad73f852835168cf2663bf47f16447f20b97ad6 +size 136892 diff --git a/qdE2T4oBgHgl3EQf0gjz/vector_store/index.faiss b/qdE2T4oBgHgl3EQf0gjz/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..33e0eec0ff08e3edc1f8126ef101594a0262c505 --- /dev/null +++ b/qdE2T4oBgHgl3EQf0gjz/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e50999255461306c8f860bdf2b17ddb6c7df9326ceade3c885d5b7e8191dcf5 +size 1376301 diff --git a/rdE1T4oBgHgl3EQf2wXf/vector_store/index.pkl b/rdE1T4oBgHgl3EQf2wXf/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b4139d3fd9329a6484277f9e41e7e21c51c03d4d --- /dev/null +++ b/rdE1T4oBgHgl3EQf2wXf/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b04c99dd51c139bff55ef82b729a9927000855cf4653bcc7542b0308cbf1b24c +size 102669 diff --git a/rdE3T4oBgHgl3EQfMgkz/vector_store/index.faiss b/rdE3T4oBgHgl3EQfMgkz/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..45c1ed362964607e7785f674eb62415e70cddbd4 --- /dev/null +++ b/rdE3T4oBgHgl3EQfMgkz/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5188025d11dccffb325875e90b1475567fba9b19f0758c4d07f9ea58c56fdf0 +size 20119597 diff --git a/stE_T4oBgHgl3EQf8xx6/content/2301.08377v1.pdf b/stE_T4oBgHgl3EQf8xx6/content/2301.08377v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..001a695759263d30184ed4f23a33fae95ed0810d --- /dev/null +++ b/stE_T4oBgHgl3EQf8xx6/content/2301.08377v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3ee705bd412d86dfc7cf6eb7f42b73dcfeea0dbb0667c872c9526db196ebeb9 +size 625018 diff --git a/stE_T4oBgHgl3EQf8xx6/vector_store/index.pkl b/stE_T4oBgHgl3EQf8xx6/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..3c7531cafaf31c51a16d3ff78ad9582d7f0c2bd5 --- /dev/null +++ b/stE_T4oBgHgl3EQf8xx6/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8c07c6046fe9c0c3bc4fc5da6c924469d3403e564208ec98aab2f6e04b817dc +size 204853 diff --git a/stFKT4oBgHgl3EQf1y5_/content/2301.11921v1.pdf b/stFKT4oBgHgl3EQf1y5_/content/2301.11921v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..dff7eb59bf71c54181c3cf76f7276aee8f3dad42 --- /dev/null +++ b/stFKT4oBgHgl3EQf1y5_/content/2301.11921v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d04cad42cec4e733390ea5edcb89fc8efbbf56ca12a77285632922c0f40e2a1 +size 583423 diff --git a/stFKT4oBgHgl3EQf1y5_/vector_store/index.faiss b/stFKT4oBgHgl3EQf1y5_/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..a5a022cd6d9d2f7eeabf122223ee8bff933c619a --- /dev/null +++ b/stFKT4oBgHgl3EQf1y5_/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5cfde8f54c486afea6f5f0a1635cbe5623dc2b1b05f7f8d29daa3b2fa5f268b +size 1769517 diff --git a/stFKT4oBgHgl3EQf1y5_/vector_store/index.pkl b/stFKT4oBgHgl3EQf1y5_/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..cb9ab79d1d0a8e48e1db1b5c9e6432e74ff28c3e --- /dev/null +++ b/stFKT4oBgHgl3EQf1y5_/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0b4919744573b66d214ed9c3e180ec1d503c9c44cc98b60a230bea483e2da46 +size 66530 diff --git a/ttAzT4oBgHgl3EQfBfrk/content/tmp_files/2301.00945v1.pdf.txt b/ttAzT4oBgHgl3EQfBfrk/content/tmp_files/2301.00945v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..76c33f43b5c77640357b1e34f0d1e370a4c04135 --- /dev/null +++ b/ttAzT4oBgHgl3EQfBfrk/content/tmp_files/2301.00945v1.pdf.txt @@ -0,0 +1,650 @@ +arXiv:2301.00945v1 [cs.IT] 3 Jan 2023 +On Euclidean, Hermitian and symplectic quasi-cyclic complementary dual codes +Chaofeng Guana,b,c, Ruihu Lia, Zhi Mab,c,∗ +aFundamentals Department, Air Force Engineering University, Xi’an, 710051, China. +bState Key Laboratory of Mathematical Engineering and Advanced Computing, Zhengzhou, 450001, China. +cHenan Key Laboratory of Network Cryptography Technology, Zhengzhou, 450001, China. +Abstract +Linear complementary dual codes (LCD) are codes that intersect trivially with its dual. LCD codes have recently +become a popular topic due to their applications in data storage, communication systems, and cryptography. In +this paper, we propose a new equivalence definition for LCD codes, which allows us to judge the complementary +duality of linear codes from the codeword level. Further, we determine the necessary and sufficient conditions for +quasi-cyclic codes to be LCD codes involving Euclidean, Hermitian, and symplectic inner products. Finally, we give +several examples demonstrating that quasi-cyclic codes can be utilized to construct good Euclidean, Hermitian, and +symplectic LCD codes. +Keywords: quasi-cyclic codes, complementary dual codes, necessary and sufficient conditions, Euclidean, +Hermitian, symplectic +1. Introduction +Linear complementary dual codes (LCD) intersect their dual codes trivially. LCD codes have been used exten- +sively in data storage, communication systems, consumer electronics, and cryptography. In [1], Massey showed that +LCD codes provide an optimal linear coding scheme for a two-user binary adder channel. In [2], Carlet et al. studied +the application of binary LCD codes in countering side channel attacks and fault injection attacks and proposed sev- +eral LCD code constructions. Moreover, it was shown in [3] that LCD codes satisfy the Gilbert-Varshamov bound. +Due to the critical application of LCD codes, much research has been conducted on LCD codes [4, 5, 6, 7, 8, 9]. +Notably, Carlet et al. [10] showed that any code over Fq is equivalent to some Euclidean LCD code for q ≥ 4, +and any code over Fq2 is equivalent to some Hermitian LCD code for q ≥ 3. Therefore, most research on LCD codes +is currently focused on small fields. Bouyuklieva [11], Harada [12], Ishizuka et al. [13, 14], Li et al. [15], and +Liu et al. [16] constructed many good binary, ternary Euclidean LCD codes and quaternary Hermitian LCD codes. +They have established several LCD code tables with short lengths. In addition, Shi et al. [17] introduced additive +complementary dual codes (ACD) for security applications that still makes sense. With [18], symplectic inner product +and trace Hermitian inner product are equivalent, so a F2n +q -symplectic LCD code is equivalent to a Fn +q2-trace Herimtian +LCD code. Therefore, the construction of the symplectic LCD code is also of significant importance. Xu et al. +[19] constructed a class of symplectic LCD MDS codes by employing Vandermonde matrices. In [20], Huang et al. +construct some good low-dimensional symplectic LCD codes over F2r. +In [21], Yang and Massey provided a necessary and sufficient condition under which a cyclic code has a Eu- +clidean complementary dual. Esmaeili et al. [22] studied a sufficient condition for h-generated quasi-cyclic codes to +be Euclidean LCD codes and gave a method for constructing quasi-cyclic Euclidean LCD codes. In [23], G¨uneri et +al. characterized and studied quasi-cyclic codes with Euclidean and Hermitian complementary duals employing their +concatenation structure and presented a method for constructing quasi-cyclic Euclidean and Hermitian LCD codes +∗Corresponding author +Email addresses: gcf2020yeah.net (Chaofeng Guan), liruihu@aliyun.com (Ruihu Li), ma_zhi@163.com (Zhi Ma) +Preprint submitted to Elsevier +January 4, 2023 + +from codes over larger alphabets. Later on, Saleh et al. [24] studied quasi-twisted codes of specific lengths and pro- +posed some sufficient conditions for h-generator quasi-twisted codes to be Euclidean LCD. However, many important +issues remain regarding developing LCD codes from quasi-cyclic codes. One of the most critical issues is determining +the sufficient and necessary conditions for quasi-cyclic codes to be LCD codes so that we can construct quasi-cyclic +LCD codes more efficiently. +The main goal of this paper is to investigate quasi-cyclic Euclidean, Hermitian, and symplectic LCD codes. We +first ascertain the sufficient and necessary conditions for quasi-cyclic codes to be Euclidean, Hermitian, and symplectic +LCD codes. More precisely, we answer the following two questions: +1, What polynomials can be applied to construct quasi-cyclic LCD codes? +2, How to use polynomials to construct quasi-cyclic LCD codes? +Firstly, we give an equivalent definition of an LCD code that allows the treatment of LCD codes at the codeword +level. Then, by decomposing the codeword space of quasi-cyclic code, we obtain the sufficient and necessary condi- +tions for quasi-cyclic codes to intersect their dual trivially under Euclidean, Hermitian, and symplectic inner products. +Finally, we present a practical method for constructing LCD codes using quasi-cyclic codes and give some examples +of good quasi-cyclic Euclidean, Hermitian, and symplectic LCD codes. +The paper is organized as follows. Section 2 gives preliminaries and background on quasi-cyclic codes, Euclidean, +Hermitian, and symplectic LCD codes. In Section 3 and 4, we redescribe LCD codes in terms of codewords and iden- +tify sufficient and necessary conditions for the quasi-cyclic codes to be LCD codes under Euclidean, Hermitian, and +symplectic inner products, respectively. In Section 5, we give several examples to illustrate the method of constructing +quasi-cyclic Euclidean, Hermitian, and symplectic LCD codes. Finally, we give concluding remarks in Section 6. All +calculations in this paper are done with the algebraic computer system Magma [25]. +2. Preliminaries +In this section, we introduce some basic concepts of quasi-cyclic codes, Euclidean, Hermitian, and symplectic +LCD codes to facilitate the unfolding of subsequent sections. For more details, we refer the reader to the standard +handbook [26, 27]. +2.1. Basics of linear codes +Throughout this paper, p is a prime, and Fq is the finite field of order q, where q = pr for some positive integer +r. A [ℓn, k]q linear code C over Fq is a linear subspace of Fℓn +q with dimension k. Let ⃗u = (u0, u1, . . ., uℓn−1) ∈ C , +then Hamming weight of ⃗u is wH(⃗u) = #{i | ui � 0, 0 ≤ i ≤ ℓn − 1}. If minimum Hamming distance of C is +dH = min{wH(⃗u) | ⃗u ∈ C \ {0}}, then C can be written as [ℓn, k, dH]q. If ℓ is even, let N = ℓn/2, then symplectic +weight of ⃗u is ws(⃗u) = #{i | (ui, uN+i) � (0, 0), 0 ≤ i ≤ N − 1}. Analogously, if minimum symplectic weight of C is +ds(C ) = min{ws(⃗u) | ⃗u ∈ C \ {0}}, then we denote C as [ℓn, k, ds]s +q. +The Euclidean inner product of ⃗x = (x0, . . . , xℓn−1), ⃗y = (y0, . . . , yℓn−1) ∈ Fℓn +q is defined as: +⟨⃗x,⃗y⟩e = +ℓn +� +i=1 +xiyi. +(1) +Similarly, the Hermitian inner product of ⃗x,⃗y ∈ Fℓn +q2 is defined as: +⟨⃗x,⃗y⟩h = +ℓn +� +i=1 +xiyq +i . +(2) +If ℓ is even, then symplectic inner product of ⃗x,⃗y ∈ Fℓn +q is: +⟨⃗x,⃗y⟩s = +N−1 +� +i=0 +(xiyN+i − xN+iyi) . +(3) +The Euclidean, Hermitian and symplectic dual codes of C can be separately expressed as C ⊥e = {⃗v ∈ Fℓn +q +| +⟨⃗u,⃗v⟩e = 0, ∀⃗u ∈ C }, C ⊥q = {⃗v ∈ Fℓn +q2 | ⟨⃗u,⃗v⟩h = 0, ∀⃗u ∈ C } and C ⊥s = {⃗v ∈ Fℓn +q +| ⟨⃗u,⃗v⟩s = 0, ∀⃗u ∈ C }. If +C ∩ C ⊥∗ = {0}, then C is an LCD code, where “⊥∗” represents one of Euclidean, Hermitian and symplectic dual. +2 + +2.2. Basics of quasi-cyclic codes +Cyclic codes are a particular type of linear codes that are closed under the right cyclic shift operator τ1. For +⃗x = (x0, x1, . . ., xn−1) ∈ Fn +q, we denote τ1(⃗x) = (xn−1, x0, . . . , xn−2) . If C = τ1(C ) then C is called a cyclic code. Let +R = Fq[x]/ ⟨xn − 1⟩, and define a mapping ϕ1 as follows, +ϕ1 : Fn +q → R +(c0, c1, . . ., cn−1) �→ c0 + c1x + · · · + cn−1xn−1 +(4) +Clearly, ϕ1 is an isomorphism of Fq-modules and a cyclic code C of length n is an ideal of the quotient ring R. +Furthermore, a cyclic code C can be generated by a monic divisor g(x) of xn − 1. The polynomial g(x) is called the +generator polynomial of C , and the dimension of C is n−deg(g(x)). Let h(x) = xn −1/g(x) and ˜h(x) = xdeg(h(x))h(x−1), +then Euclidean dual code of C is cyclic code with generator polynomial g⊥e(x) = ˜h(x). Let gq(x) = gq +0+gq +1x+gq +2x+· · ·+ +gq +n−1xn−1. If C is a cyclic codes over Fq2, then Hermitian dual code of C is cyclic code generated by g⊥q(x) = ˜hq(x−1). +Let ⃗x = (x0, x1, . . . , xℓn−1) ∈ Fℓn +q , and τ2(⃗x) = (xn−1, x0, . . . , xn−2, x2n−1, xn, . . ., x2n−2, . . . , xnℓ−1, x(n−1)ℓ, . . . , xnℓ−2). +A linear space C ⊂ Fℓn +q said to be a quasi-cyclic code of index ℓ if C = τ2(C ). Define an Fq -module isomorphism ϕ2 +from Fℓn +q to Rℓ, +ϕ2 : Fℓn +q → Rℓ = R ⊕ R ⊕ · · · ⊕ R +�c0, . . ., cn−1, cn, . . ., c2n−1, . . ., cℓ(n−1), . . ., cℓn−2 +� +�→ (c0(x), c1(x), . . ., cℓ−1(x)) +(5) +where ci(x) = �n−1 +t=0 ct,ixt, i = 0, 1, . . ., ℓ − 1. Algebraically, a quasi-cyclic code C is an R-submodule of Rℓ. A +generator matrix of h-generator quasi-cyclic code with index ℓ has the following form: +M = + +A1,0 +A1,1 +· · · +A1,ℓ−1 +A2,0 +A2,1 +· · · +A2,ℓ−1 +... +... +... +... +Ah,0 +Ah,1 +· · · +Ah,ℓ−1 + +, +(6) +where Ai, j are circulant matrices defined by some polynomials ai, j(x) ∈ R, where 1 ≤ i ≤ h and 0 ≤ j ≤ ℓ − 1. +3. New characterization of complementary dual codes +This section will give a new characterization of LCD codes in terms of codewords, laying the foundation for further +proof. First, we make a convention for the representation of inner products, where “l” denotes one of Euclidean and +Hermitian inner products, and “∗” denotes one of Euclidean, Hermitian, and symplectic products. +Lemma 1. Let C be a linear code over Fq, then C is an LCD code under the inner product “∗” if and only if +∀c1 ∈ C \ {0}, ∃c2 ∈ C , ⟨c1, c2⟩∗ � 0 holds. +Proof. It is obvious that C ∩ C ⊥∗ = {0} is equivalent with ∀c1 ∈ C \ {0}, c1 � C ⊥∗. Moreover, c1 � C ⊥∗ is equivalent +with ∃c2 ∈ C , ⟨c1, c2⟩∗ � 0, so C is LCD equivalent with ∀c1 ∈ C \ {0}, ∃c2 ∈ C , ⟨c1, c2⟩∗ � 0. +For ease of presentation, we give the following definition. +Definition 1. Let C1 and C2 are linear codes over Fq, if the following conditions hold: +∀c1 ∈ C1 \ {0}, ∃c2 ∈ C2, ⟨c1, c2⟩∗ � 0, +∀c2 ∈ C2 \ {0}, ∃c1 ∈ C1, ⟨c1, c2⟩∗ � 0, +(7) +then we call C1 and C2 completely non-orthogonal to each other under inner product “∗”. +Lemma 2. Let C1 and C2 are linear codes over Fq, then C1 and C2 completely non-orthogonal to each other holds if +and noly if C1 ∩ C ⊥∗ +2 += {0} and C ⊥∗ +1 +∩ C2 = {0}. +3 + +Proof. This lemma holds from the definition of dual codes. +Lemma 3. Let C1 and C2 are two cyclic codes , and separately generated by g1(x) and g2(x), where g1(x) | xn −1 and +g2(x) | xn − 1, then C1 and C2 completely Euclidean non-orthogonal to each other is equivalent with g1(x) = ˜g1(x) = +g2(x), and C1 and C2 completely Hermitian non-orthogonal to each other is equivalent with g1(x) = ˜gq +1(x) = g2(x). +Proof. With [26], C1 ∩C ⊥l +2 and C ⊥l +1 ∩C2 are both cyclic codes generated by lcm(g1(x), g⊥l +2 (x)) and lcm(g⊥l +1 (x), g2(x)), +respectively. Further, C1∩C ⊥l +2 += {0} and C ⊥l +1 ∩C2 = {0} yield lcm(g1(x), g⊥l +2 (x)) ≡ 0 (mod xn−1) and lcm(g⊥l +1 (x), g2(x)) ≡ +0 (mod xn − 1). Thus, there are two cases, the first is ˜g2(x) | g1(x) and ˜g1(x) | g2(x) ⇐⇒ g1(x) = ˜g1(x) = g2(x). The +second is ˜gq +2(x) | g1(x) and ˜gq +1(x) | g2(x) ⇐⇒ g1(x) = ˜gq +1(x) = g2(x). Hence, we complete the proof. +4. Quasi-cyclic complementary dual codes +In this section, we determine the sufficient and necessary conditions for quasi-cyclic codes to be LCD codes under +Euclidean, Hermitian, and symplectic inner products, starting from Lemma 1. +Some of the symbols used in this paper are described below for ease of expression. Let g(x) = g0 + g1x + g2x + +· · · + gn−1xn−1 ∈ R, [g(x)] denote vector defined by coefficients of g(x) in Fn +q, i.e. [g(x)] = [g0, g1, g2, · · · , gn−1], and +¯g(x) = xng(x−1). +In order to determine the Euclidean inner product between different polynomials in coefficient vector form, the +following two lemmas are crucial. +Lemma 4. ([28]) Let f(x), g(x) and h(x) be polynomials in R. Then the following equation holds for the Euclidean +inner product among them: +⟨[f(x)g(x)], [h(x)]⟩e = ⟨[g(x)], [ ¯f(x)h(x)]⟩e. +(8) +Lemma 5. ([29]) Let f(x), g(x) and h(x) be monic polynomials in R. Then the following equality of Hermitian inner +product of vectors in Fn +q2 holds: +⟨[f(x)g(x)], [h(x)]⟩h = ⟨[g(x)], [ ¯f q(x)h(x)]⟩h. +4.1. One-generator complementary dual quasi-cyclic codes +Definition 2. Let g(x) and f j(x) are monic polynomials in R, and g(x) | (xn − 1), 0 ≤ j ≤ ℓ − 1. If C is a quasi-cyclic +code generated by ([g(x)f0(x)], [g(x)f1(x)], · · · , [g(x)fℓ−1(x)]), then C is called 1-generator quasi-cyclic code with +index ℓ. A genrartor matrix G of C have the following form: +G = (G0,G1, · · · ,Gℓ−1) , +(9) +where G j are n × n circulant matrices generated by [g(x)f j(x)], respectively. +Theorem 6. If C is a 1-generator quasi-cyclic code in Definition 2, then the sufficient and necessary conditions for +C to be Euclidean LCD code are +g(x) = ˜g(x), +gcd( +ℓ−1 +� +i=0 +fi(x) ¯fi(x), xn−1 +g(x) ) = 1. +(10) +Proof. Suppose a(x),b(x) are any polynomials in R, then any two codewords in C can be represented as c1 = +([a(x)g(x)f0(x)], [a(x)g(x)f1(x)], · · · , [a(x)g(x)fℓ−1(x)]) and c2 = ([b(x)g(x)f0(x)], [b(x)g(x)f1(x)], · · · , [b(x)g(x)fℓ−1(x)]), +respectively. The Euclidean inner product of c1 and c2 can be expressed as: +⟨c1, c2⟩e = +ℓ−1 +� +i=0 +⟨[a(x)g(x)fi(x)], [b(x)g(x)fi(x)]⟩e += +ℓ−1 +� +i=0 +⟨[a(x)g(x)fi(x) ¯fi(x)], [b(x)g(x)]⟩e += +⟨[a(x)g(x) +ℓ−1 +� +i=0 +fi(x) ¯fi(x)], [b(x)g(x)]⟩e. +From Lemma 1 and 3, it is clear that the sufficient and necessary conditions for C to be Euclidean LCD code is +that g(x) = ˜g(x) and gcd( +ℓ−1 +� +i=0 +fi(x) ¯fi(x), xn−1 +g(x) ) = 1, so this theorem is proved. +4 + +Since the Hermitian inner product and the Euclidean inner product have a similar form, an analogous approach +yields sufficient and necessary conditions for the 1-generator quasi-cyclic code to be a Hermitian LCD code. There- +fore, we give the following theorem without proof. +Theorem 7. If C is a 1-generator quasi-cyclic code in Definition 2, then the sufficient and necessary condition for C +to be Hermitian LCD code are +g(x) = ˜gq(x), +gcd( +ℓ−1 +� +i=0 +fi(x) ¯f q +i (x), xn−1 +g(x) ) = 1. +(11) +Theorem 8. If C is a 1-generator quasi-cyclic code in Definition 2, and ℓ is even. Let m = ℓ/2, then C is symplectic +LCD code if and only if the following equations hold. +g(x) = ˜g(x), +gcd( +m−1 +� +j=0 +(f j(x) ¯fm+ j(x) − fm+ j(x) ¯f j(x)), xn−1 +g(x) ) = 1. +(12) +Proof. Suppose a(x),b(x) are any polynomials in R, then any two codewords in C can be represented as c1 = +([a(x)g(x)f0(x)], [a(x)g(x)f1(x)], · · · , [a(x)g(x)fℓ−1(x)]) and c2 = ([b(x)g(x)f0(x)], [b(x)g(x)f1(x)], · · · , [b(x)g(x)fℓ−1(x)]), +respectively. The symplectic inner product of c1 and c2 can be expressed as: +⟨c1, c2⟩s = +c1 · +� +0 +Imn +−Imn +0 +� +· cT +2 += +m−1 +� +j=0 +⟨[a(x)g(x)f j(x)], [b(x)g(x)fm+ j(x)]⟩e +− +m−1 +� +j=0 +⟨[a(x)g(x)fm+ j(x)], [b(x)g(x)f j(x)]⟩e += +m−1 +� +j=0 +⟨[a(x)g(x)f j(x) ¯fm+ j(x)], [b(x)g(x)]⟩e +− +m−1 +� +j=0 +⟨[a(x)g(x)fm+ j(x) ¯f j(x)], [b(x)g(x)]⟩e += +⟨[a(x)g(x) +m−1 +� +j=0 +(f j(x) ¯fm+ j(x) − fm+ j(x) ¯f j(x))], [b(x)g(x)]⟩e. +From Lemma 1 and 3, it is clear that the sufficient and necessary conditions for C to be symplectic LCD code is +that g(x) = ˜g(x) and gcd( +m−1 +� +j=0 +(f j(x) ¯fm+ j(x) − fm+ j(x) ¯f j(x)), xn−1 +g(x) ) = 1, so this theorem is proved. +4.2. Multi-generator complementary dual quasi-cyclic codes +A multi-generator quasi-cyclic code is a quasi-cyclic code with more than two generators which has a richer +algebraic structure than a 1-generator quasi-cyclic code. The specific definition of multi-generator quasi-cyclic code +is given below. +Definition 3. Let gi(x), fi, j(x) are monic polynomials in R, where gi(x) | (xn−1), and 1 ≤ i ≤ h, 0 ≤ j ≤ ℓ−1. If C is a +quasi-cyclic code with h-generators: ([g1(x)f1,0(x)], [g1(x)f1,1(x)], · · · , [g1(x)f1,ℓ−1(x)]), ([g2(x)f2,0(x)], [g2(x)f2,1(x)], +· · ·, [g2(x)f2,ℓ−1(x)]), . . ., ([gh(x)fh,0(x)], [gh(x)fh,1(x)], · · · ,[gh(x)fh,ℓ−1(x)]), then C is called h-generator quasi-cyclic +code with index ℓ, whose generator matrix G has the following form: +G = + +G1,0 +G1,1 +· · · +G1,ℓ−1 +G2,0 +G2,1 +· · · +G2,ℓ−1 +... +... +... +... +Gh,0 +Gh,1 +· · · +Gh,ℓ−1 + +, +(13) +where Gi, j are n × n circulant matrices generated by [gi(x)fi, j(x)], respectively. +5 + +It follows from Definition 3 that a multi-generator quasi-cyclic code can be considered a code generated by jux- +taposing multiple 1-generator quasi-cyclic codes up and down. Therefore, before determining the overall complete +non-orthogonalityof multi-generator quasi-cyclic code, the relationship between the different 1-generator quasi-cyclic +codes as constituents needs to be determined. +Theorem 9. Let gi(x), fi, j(x) are monic polynomials in R, and gi(x) | (xn − 1), 1 ≤ i ≤ 2, 0 ≤ j ≤ ℓ − 1. C1 +and C2 are 1-generator quasi-cyclic codes with generators ([g1(x)f1,0(x)], [g1(x)f1,1(x)], · · · , [g1(x)f1,ℓ−1(x)]) and +([g2(x)f2,0(x)], [g2(x)f2,1(x)], · · · , [g2(x)f2,ℓ−1(x)]), respectively. Then, C1 and C2 complete Euclidean non-othogaonal +with each other is equivalent with +g1(x) = g2(x) = ˜g1(x), +gcd( +ℓ−1 +� +j=0 +f1, j(x) ¯f2, j(x), xn−1 +g1(x)) = 1. +(14) +where Gi, j are n × n circulant matrices generated by [gi(x)fi, j(x)], respectively. +Proof. Assume that a(x) and b(x) are arbitrary polynomials in R, then any codewords in C1 and C2 can be represented +as c1 = ([a(x)g1(x)f1,0(x)], [a(x)g1(x)f1,1(x)], · · · , [a(x)g1(x)f1,ℓ−1(x)]) and c2 = ([b(x)g2(x)f2,0(x)], [b(x)g2(x)f2,1(x)], +· · · , [b(x)g2(x)f2,ℓ−1(x)]), respectively. The Euclidean inner product of c1 and c2 can be expressed as +⟨c1, c2⟩e = +ℓ−1 +� +j=0 +⟨[a(x)g1(x)f1, j(x)], [b(x)g2(x)f2, j(x)]⟩e += +ℓ−1 +� +j=0 +⟨[a(x)g1(x)f1, j(x) ¯f2, j(x)], [b(x)g2(x)]⟩e += +⟨[a(x)g1(x) +ℓ−1 +� +j=0 +f1, j(x) ¯f2, j(x)], [b(x)g2(x)]⟩e. +From Lemma 3, it is clear that C1 and C2 completely Euclidean non-othogaonal with each other is equivalent with +g1(x) = g2(x) = ˜g1(x) and gcd( +ℓ−1 +� +j=0 +f1, j(x) ¯f2, j(x), xn−1 +g1(x)) = 1 hold, so this theorem is proved. +There is a similar result under Hermitian inner product, so we give the following theorem without proof. +Theorem 10. Let gi(x), fi, j(x) are monic polynomials in R, and gi(x) | (xn − 1), 1 ≤ i ≤ 2, 0 ≤ j ≤ ℓ − 1. C1 +and C2 are 1-generator quasi-cyclic codes with generators �[g1(x)f1,0(x)], [g1(x)f1,1(x)], · · · , [g1(x)f1,ℓ−1(x)]� and +�[g2(x)f2,0(x)], [g2(x)f2,1(x)], · · · , [g2(x)f2,ℓ−1(x)]�, respectively. Then, C1 and C2 complete Hermitian non-othogaonal +with each other is equivalent with +g1(x) = g2(x) = ˜gq +1(x), +gcd( +ℓ−1 +� +j=0 +f1, j(x) ¯f q +2, j(x), xn−1 +g1(x)) = 1. +(15) +where Gi, j are n × n circulant matrices generated by [gi(x)fi, j(x)], respectively. +Theorem 11. Let ℓ be even, m = ℓ/2, gi(x), fi, j(x) are monic polynomials in R, and gi(x) | (xn −1), 1 ≤ i ≤ 2, 0 ≤ j ≤ +ℓ−1. C1 and C2 are 1-generator quasi-cyclic codes with generators ([g1(x)f1,0(x)], [g1(x)f1,1(x)], · · · , [g1(x)f1,ℓ−1(x)]) +and ([g2(x)f2,0(x)], [g2(x)f2,1(x)], · · · , [g2(x)f2,ℓ−1(x)]), respectively. Then, C1 and C2 complete symplectic non-orthogonal +with each other is equivalent with +g1(x) = g2(x) = ˜g1(x), +gcd( +m−1 +� +j=0 +(f1, j(x) ¯f2,m+ j(x) − f1,m+ j(x) ¯f2, j(x)), xn−1 +g1(x)) = 1. +(16) +where Gi, j are n × n circulant matrices generated by [gi(x)fi, j(x)], respectively. +6 + +Proof. Suppose a(x) and b(x) are any polynomials in R, then any two codewords in C can be represented as +c1 = ([a(x)g1(x)f1,0(x)], [a(x)g1(x)f1,1(x)], · · · , [a(x)g1(x)f1,ℓ−1(x)]) and c2 = ([b(x)g2(x)f2,0(x)], [b(x)g2(x)f2,1(x)], +· · · , [b(x)g2(x)f2,ℓ−1(x)]), respectively. The symplectic inner product of c1 and c2 can be expressed as: +⟨c1, c2⟩s = +c1 · +� +0 +Imn +−Imn +0 +� +· cT +2 += +m−1 +� +j=0 +⟨[a(x)g1(x)f1, j(x)], [b(x)g(x)f2,m+ j(x)]⟩e +− +m−1 +� +j=0 +⟨[a(x)g1(x)f1,m+ j(x)], [b(x)g2(x)f2, j(x)]⟩e += +m−1 +� +j=0 +⟨[a(x)g1(x)f1, j(x) ¯f2,m+ j(x)], [b(x)g2(x)]⟩e +− +m−1 +� +j=0 +⟨[a(x)g1(x)f1,m+ j(x) ¯f2, j(x)], [b(x)g2(x)]⟩e += +⟨[a(x)g1(x) +m−1 +� +j=0 +(f1, j(x) ¯f2,m+ j(x) − f1,m+ j(x) ¯f2, j(x))], [b(x)g2(x)]⟩e. +From Lemma 1 and 3, it is clear that the sufficient and necessary conditions for C1 and C2 complete symplectic +non-othogaonal with each other is is that g1(x) = g2(x) = ˜g1(x) and gcd( +m−1 +� +j=0 +(f1, j(x) ¯f2,m+ j(x) − f1,m+ j(x) ¯f2, j(x)), xn−1 +g1(x)) = +1 hold, so this theorem is proved. +Theorem 12. Let C be an h-generator quasi-cyclic code in Definition 3 and Ci are 1-generator quasi-cyclic codes +generated by ([gi(x)fi,0(x)], [gi(x)fi,1(x)], · · · , [gi(x)fi,ℓ−1(x)]), i ∈ {1, 2, · · · , h}. Then C is LCD code if and only if +∀r, s ∈ {1, 2, · · · , h}, there exist Cr ∩ C ⊥∗ +s += {0}. +Proof. With Definition 3, C = C1 + C2 + · · · + Ch. C is LCD if and only if C ∩ C ⊥∗ = {0}, i.e., ∀c1 ∈ C , there exist +c2 ∈ C , ⟨c1, c2⟩∗ � 0. Therefore, ∀r, s ∈ {1, 2, · · · , h}, there is Cr ∩ C ⊥∗ +s += {0}. +In addition, C ⊥∗ = C ⊥∗ +1 +∩ C ⊥∗ +2 +∩ · · · ∩ C ⊥∗ +h . For the reason that ∀r, s ∈ {1, 2, · · · , h}, there is Cr ∩ C ⊥∗ +s += {0}, so +C ∩ C ⊥∗ = {0}. +It is evident that by combining Theorems 6-12, we have the following theorems. +Theorem 13. Let C be h-generator quasi-cyclic code with index ℓ over Fq in Definition 3, then the sufficient and +necessary conditions for C to be Euclidean LCD code is +g1(x) = g2(x) = · · · = gh(x) = ˜g1(x), +∀r, s ∈ {1, 2, · · · , h}, gcd( +ℓ−1 +� +j=0 +fr, j(x) ¯fs, j(x), xn−1 +g1(x)) = 1. +(17) +Theorem 14. Let C be h-generator quasi-cyclic code with index ℓ over Fq2 in Definition 3, then the sufficient and +necessary conditions for C to be Hermitian LCD code is +g1(x) = g2(x) = · · · = ˜gq +1(x), +∀r, s ∈ {1, 2, · · · , h}, gcd( +ℓ−1 +� +j=0 +fr, j(x) ¯f q +s, j(x), xn−1 +g1(x)) = 1. +(18) +Theorem 15. Let ℓ be even, m = ℓ/2, C be h-generator quasi-cyclic code with index ℓ over Fq in Definition 3, then +the sufficient and necessary conditions for C to be symplectic LCD code are +g1(x) = g2(x) = · · · = ˜g1(x), +∀r, s ∈ {1, 2, · · · , h}, gcd( +m−1 +� +i=0 +(fs,i(x) ¯fr,m+i(x) − fs,m+i(x) ¯fr,i(x)), xn−1 +g1(x)) = 1. +(19) +7 + +5. Some examples of good complementary dual quasi-cyclic codes +In this section, we provide some examples from our construction of good LCD code to help the reader better +understand the approach of this paper. +Specifically, Examples 1, 2, and 3 explain the construction methods of the Euclidean, Hermitian, and symplectic +LCD codes in Section 4, respectively. +Example 1. Set q = 2, n = 13. Let g(x) = 1, f0(x) = 1, f1(x) = x12+x7+x3+x+1, and f2(x) = x12+x11+x9+x8+x5+ +x3 + x2. One can easy to check that g(x) = ˜g(x), gcd(�2 +i=0 fi(x) ¯fi(x), xn−1 +g(x) ) = 1, so ([g(x)f0(x)], [g(x)f1(x)], [g(x)f2(x)]) +can generate a 1-generator quasi-cyclic Euclidean LCD code. Then, using Magma [25] we can compute this code +have parameters [39, 13, 12]2, whose weight distribution is w(z) = 1 + 39z12 + 208z13 + 286z14 + 325z15 + 546z16 + +· · · + z39. Observe that a code with parameters [39, 13, 11]2 is the best-known binary Euclidean LCD with length 39 +and dimension 13 in [15]. Therefore, the corresponding minimum distance record can be improved to 12. +Example 2. Set q2 = 4, n = 19. Let w be a primitive element of F4. Let g(x) = x + 1, and f0(x) = w2x18 + +x17 + w2x16 + x15 + x14 + w2x13 + wx12 + wx11 + w2x10 + x9 + x8 + w2x7 + wx6 + x5 + wx4 + x3 + x2 + wx + 1, +f1(x) = x15 + w2x14 + wx12 + w2x10 + wx7 + x6 + wx5 + wx4 + wx3 + wx2 + w2x + 1. One can easy to check that +g(x) = ˜gq(x), gcd(f0(x) ¯f q +0 (x) + f1(x) ¯f q +1 (x), xn−1 +g(x) ) = 1, so ([g(x)f0(x)], [g(x)f1(x)]) can generate a 1-generator quasi- +cyclic Hermitian LCD code. Then, using Magma [25] we can compute this code have parameters [38, 18, 12]4, and it’s +dual is [38, 20, 11]4, their weight distributions are w(z) = 1 + 912z12 + 7296z13 + 44859z14 + 199842z15 + 886977z16 + +· · · + 1229205z38 and w(z⊥) = 1 + 2736z11 + 21888z12 + 127224z13 + 684171z14 + 3211722z15 + · · · + 19686954z38, +respectively. It is worth stating that both [38, 18, 12]4 and [38, 20, 11]4 reach the minimum distance lower bounds in +code table [30], so they are both best codes. +Example 3. Set q = 2, n = 21. Let g(x) = x3 + 1, f0(x) = x18 + x16 + x15 + x14 + x13 + x12 + x8 + x7 + x3 + 1, +f1(x) = x20 + x19 + x18 + x15 + x14 + x9 + x7 + x5 + x3 + x2 + 1. One can easy to check that g(x) = ˜g(x), gcd(f0(x) ¯f1(x) − +f1(x) ¯f0(x), xn−1 +g(x) ) = 1, so ([g(x)f0(x)], [g(x)f1(x)]) can generate a 1-generator quasi-cyclic symplectic LCD code. Then, +using Magma [25] we can calculate this code have parameters [42, 18, 9]s +2, whose symplectic weight distribution is +w(z) = 1 + 448z9 + 1344z10 + 3906z11 + 9051z12 + 18753z13 + · · · + 609z21. Therefore, there exists a trace Hermitian +ACD code with parameters (21, 9, 9)4. It should be noted that the best known Hermitian LCD code in [14] with length +21 and dimension 9 have parameters [21, 9, 8]4, so our symplectic construction has better performance. +6. Conclusion +In this work, we propose an equivalence definition for LCD codes, which allows us to determine the comple- +mentary duality of linear codes from the codeword level. Furthermore, depending on this result, we determine the +necessary and sufficient conditions for quasi-cyclic codes to be LCD codes concerning Euclidean, Hermitian, and +symplectic inner products. Finally, we give some specific examples of the construction of Euclidean, Hermitian, and +symplectic quasi-cyclic LCD codes to show that quasi-cyclic codes can be utilized to construct good LCD codes. +However, by Theorems 13, 14 and 15, the reader can find that when the quasi-cyclic codes are multi-generated, +different generators can only pick the same self-reciprocal polynomial, which significantly limits the performance +of the quasi-cyclic LCD codes. Therefore, research on quasi-cyclic LCD codes should mainly focus on 1-generator +quasi-cyclic codes, which will help to construct quasi-cyclic LCD codes of low dimensions. In the case of LCD +codes with dimensions close to ℓn/2, it is required to use 1-generator quasi-cyclic codes with a small index or other +mathematical tools. We hope this will attract scholars’ interest in research related to quasi-cyclic codes and advance +this area together. +7. Acknowledgments +This work is supported by the National Natural Science Foundation of China under Grant No.U21A20428, +61972413, 61901525, 62002385, Natural Science Foundation of Shaanxi under Grant No.2021JM-216, 2021JQ-335. +8 + +References +[1] J. L. Massey, Linear codes with complementary duals, Discrete Mathematics 106 (1992) 337–342. +[2] C. Carlet, S. Guilley, Complementary dual codes for counter-measures to side-channel attacks, Advances in Mathematics of Communications +10 (1) (2016) 131. +[3] N. Sendrier, Linear codes with complementary duals meet the Gilbert–Varshamov bound, Discrete mathematics 285 (1-3) (2004) 345–347. +[4] C. Li, C. Ding, S. Li, LCD cyclic codes over finite fields, IEEE Transactions on Information Theory 63 (7) (2017) 4344–4356. +[5] S. T. Dougherty, J.-L. Kim, B. Ozkaya, L. Sok, P. Sol´e, The combinatorics of LCD codes: linear programming bound and orthogonal matrices, +International Journal of Information and Coding Theory 4 (2-3) (2017) 116–128. +[6] L. Galvez, J. L. Kim, N. Lee, Y. G. Roe, B.-S. Won, Some bounds on binary LCD codes, Cryptography and Communications 10 (4) (2018) +719–728. +[7] C. Carlet, S. Mesnager, C. Tang, Y. Qi, Euclidean and Hermitian LCD MDS codes, Designs, Codes and Cryptography 86 (11) (2018) +2605–2618. +[8] C. Li, Hermitian LCD codes from cyclic codes, Designs, Codes and Cryptography 86 (10) (2018) 2261–2278. +[9] M. Araya, M. Harada, K. Saito, Quaternary Hermitian linear complementary dual codes, IEEE Transactions on Information Theory 66 (5) +(2019) 2751–2759. +[10] C. Carlet, S. Mesnager, C. Tang, Y. Qi, R. Pellikaan, Linear codes over Fq are equivalent to LCD codes for q > 3, IEEE Transactions on +Information Theory 64 (4) (2018) 3010–3017. +[11] S. Bouyuklieva, Optimal binary LCD codes, Designs, Codes and Cryptography 89 (11) (2021) 2445–2461. +[12] M. Harada, Construction of binary LCD codes, ternary LCD codes and quaternary Hermitian LCD codes, Designs, Codes and Cryptography +89 (10) (2021) 2295–2312. +[13] K. Ishizuka, K. Saito, Construction for both self-dual codes and LCD codes, arXiv preprint arXiv:2108.12544. +[14] K. Ishizuka, Construction of quaternary Hermitian LCD codes, Cryptography and Communications (2022) 1–13. +[15] S. Li, M. Shi, Improved lower and upper bounds for LCD codes, arXiv preprint arXiv:2206.04936. +[16] X. Liu, H. Liu, L. Yu, New binary and ternary LCD codes from matrix-product codes, Linear and Multilinear Algebra 70 (5) (2022) 809–823. +[17] M. Shi, N. Liu, F. ¨Ozbudak, P. Sol´e, Additive cyclic complementary dual codes over F4, Finite Fields and Their Applications 83 (2022) +102087. +[18] A. R. Calderbank, E. M. Rains, P. Shor, N. J. Sloane, Quantum error correction via codes over GF (4), IEEE Transactions on Information +Theory 44 (4) (1998) 1369–1387. +[19] H. Xu, W. Du, Constructions of symplectic LCD MDS codes, Bulletin of the Malaysian Mathematical Sciences Society 44 (5) (2021) 3377– +3390. +[20] S. H. . Xia Huang, Jin Li, Constructions of symplectic LCD MDS codes from quasi-cyclic codes, Advances in Mathematics of Communica- +tions 16 (4) (2022) 779–790. +[21] X.-H. Yang, J. L. Massey, The condition for a cyclic code to have a complementary dual, Discrete Mathematics 126 (1994) 391–393. +[22] M. Esmaeili, S. Yari, On complementary-dual quasi-cyclic codes, Finite Fields and Their Applications 15 (2009) 375–386. +[23] C. G¨uneri, B. ¨Ozkaya, P. Sol´e, Quasi-cyclic complementary dual codes, Finite Fields and Their Applications 42 (2016) 67–80. +[24] A. Saleh, M. Esmaeili, On complementary dual quasi-twisted codes, Journal of Applied Mathematics and Computing 56 (1) (2018) 115–129. +[25] W. Bosma, J. Cannon, C. Playoust, The magma algebra system I: The user language, Journal of Symbolic Computation 24 (3-4) (1997) +235–265. +[26] W. C. Huffman, V. Pless, Fundamentals of error-correcting codes, Cambridge university press, U.K, 2003. +[27] W. C. Huffman, J. L. Kim, P. Sol´e, Concise Encyclopedia of Coding Theory, Chapman and Hall/CRC, 2021. +[28] C. Galindo, F. Hernando, R. Matsumoto, Quasi-cyclic constructions of quantum codes, Finite Fields and Their Applications 52 (2018) 261– +280. +[29] J. Lv, R. Li, J. Wang, Quantum codes derived from one-generator quasi-cyclic codes with Hermitian inner product, International Journal of +Theoretical Physics 59 (1) (2020) 300–312. +[30] M. Grassl, Bounds on the minimum distance of linear codes and quantum codes, Online available at http://www.codetables.de, accessed +on 2022.12.30. +9 + diff --git a/ttAzT4oBgHgl3EQfBfrk/content/tmp_files/load_file.txt b/ttAzT4oBgHgl3EQfBfrk/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..b664aa247376b6e4d7d5fc1a917c02bf3ef7179c --- /dev/null +++ b/ttAzT4oBgHgl3EQfBfrk/content/tmp_files/load_file.txt @@ -0,0 +1,438 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf,len=437 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='00945v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='IT] 3 Jan 2023 On Euclidean, Hermitian and symplectic quasi-cyclic complementary dual codes Chaofeng Guana,b,c, Ruihu Lia, Zhi Mab,c,∗ aFundamentals Department, Air Force Engineering University, Xi’an, 710051, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' bState Key Laboratory of Mathematical Engineering and Advanced Computing, Zhengzhou, 450001, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' cHenan Key Laboratory of Network Cryptography Technology, Zhengzhou, 450001, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Abstract Linear complementary dual codes (LCD) are codes that intersect trivially with its dual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' LCD codes have recently become a popular topic due to their applications in data storage, communication systems, and cryptography.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In this paper, we propose a new equivalence definition for LCD codes, which allows us to judge the complementary duality of linear codes from the codeword level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Further, we determine the necessary and sufficient conditions for quasi-cyclic codes to be LCD codes involving Euclidean, Hermitian, and symplectic inner products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Finally, we give several examples demonstrating that quasi-cyclic codes can be utilized to construct good Euclidean, Hermitian, and symplectic LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Keywords: quasi-cyclic codes, complementary dual codes, necessary and sufficient conditions, Euclidean, Hermitian, symplectic 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Introduction Linear complementary dual codes (LCD) intersect their dual codes trivially.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' LCD codes have been used exten- sively in data storage, communication systems, consumer electronics, and cryptography.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In [1], Massey showed that LCD codes provide an optimal linear coding scheme for a two-user binary adder channel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In [2], Carlet et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' studied the application of binary LCD codes in countering side channel attacks and fault injection attacks and proposed sev- eral LCD code constructions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Moreover, it was shown in [3] that LCD codes satisfy the Gilbert-Varshamov bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Due to the critical application of LCD codes, much research has been conducted on LCD codes [4, 5, 6, 7, 8, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Notably, Carlet et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [10] showed that any code over Fq is equivalent to some Euclidean LCD code for q ≥ 4, and any code over Fq2 is equivalent to some Hermitian LCD code for q ≥ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Therefore, most research on LCD codes is currently focused on small fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Bouyuklieva [11], Harada [12], Ishizuka et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [13, 14], Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [15], and Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [16] constructed many good binary, ternary Euclidean LCD codes and quaternary Hermitian LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' They have established several LCD code tables with short lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In addition, Shi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [17] introduced additive complementary dual codes (ACD) for security applications that still makes sense.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' With [18], symplectic inner product and trace Hermitian inner product are equivalent, so a F2n q -symplectic LCD code is equivalent to a Fn q2-trace Herimtian LCD code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Therefore, the construction of the symplectic LCD code is also of significant importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [19] constructed a class of symplectic LCD MDS codes by employing Vandermonde matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In [20], Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' construct some good low-dimensional symplectic LCD codes over F2r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In [21], Yang and Massey provided a necessary and sufficient condition under which a cyclic code has a Eu- clidean complementary dual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Esmaeili et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [22] studied a sufficient condition for h-generated quasi-cyclic codes to be Euclidean LCD codes and gave a method for constructing quasi-cyclic Euclidean LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In [23], G¨uneri et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' characterized and studied quasi-cyclic codes with Euclidean and Hermitian complementary duals employing their concatenation structure and presented a method for constructing quasi-cyclic Euclidean and Hermitian LCD codes ∗Corresponding author Email addresses: gcf2020yeah.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='net (Chaofeng Guan), liruihu@aliyun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='com (Ruihu Li), ma_zhi@163.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='com (Zhi Ma) Preprint submitted to Elsevier January 4, 2023 from codes over larger alphabets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Later on, Saleh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [24] studied quasi-twisted codes of specific lengths and pro- posed some sufficient conditions for h-generator quasi-twisted codes to be Euclidean LCD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' However, many important issues remain regarding developing LCD codes from quasi-cyclic codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' One of the most critical issues is determining the sufficient and necessary conditions for quasi-cyclic codes to be LCD codes so that we can construct quasi-cyclic LCD codes more efficiently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The main goal of this paper is to investigate quasi-cyclic Euclidean, Hermitian, and symplectic LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' We first ascertain the sufficient and necessary conditions for quasi-cyclic codes to be Euclidean, Hermitian, and symplectic LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' More precisely, we answer the following two questions: 1, What polynomials can be applied to construct quasi-cyclic LCD codes?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 2, How to use polynomials to construct quasi-cyclic LCD codes?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Firstly, we give an equivalent definition of an LCD code that allows the treatment of LCD codes at the codeword level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then, by decomposing the codeword space of quasi-cyclic code, we obtain the sufficient and necessary condi- tions for quasi-cyclic codes to intersect their dual trivially under Euclidean, Hermitian, and symplectic inner products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Finally, we present a practical method for constructing LCD codes using quasi-cyclic codes and give some examples of good quasi-cyclic Euclidean, Hermitian, and symplectic LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Section 2 gives preliminaries and background on quasi-cyclic codes, Euclidean, Hermitian, and symplectic LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In Section 3 and 4, we redescribe LCD codes in terms of codewords and iden- tify sufficient and necessary conditions for the quasi-cyclic codes to be LCD codes under Euclidean, Hermitian, and symplectic inner products, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In Section 5, we give several examples to illustrate the method of constructing quasi-cyclic Euclidean, Hermitian, and symplectic LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Finally, we give concluding remarks in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' All calculations in this paper are done with the algebraic computer system Magma [25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Preliminaries In this section, we introduce some basic concepts of quasi-cyclic codes, Euclidean, Hermitian, and symplectic LCD codes to facilitate the unfolding of subsequent sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' For more details, we refer the reader to the standard handbook [26, 27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Basics of linear codes Throughout this paper, p is a prime, and Fq is the finite field of order q, where q = pr for some positive integer r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' A [ℓn, k]q linear code C over Fq is a linear subspace of Fℓn q with dimension k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let ⃗u = (u0, u1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', uℓn−1) ∈ C , then Hamming weight of ⃗u is wH(⃗u) = #{i | ui � 0, 0 ≤ i ≤ ℓn − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If minimum Hamming distance of C is dH = min{wH(⃗u) | ⃗u ∈ C \\ {0}}, then C can be written as [ℓn, k, dH]q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If ℓ is even, let N = ℓn/2, then symplectic weight of ⃗u is ws(⃗u) = #{i | (ui, uN+i) � (0, 0), 0 ≤ i ≤ N − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Analogously, if minimum symplectic weight of C is ds(C ) = min{ws(⃗u) | ⃗u ∈ C \\ {0}}, then we denote C as [ℓn, k, ds]s q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The Euclidean inner product of ⃗x = (x0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' , xℓn−1), ⃗y = (y0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' , yℓn−1) ∈ Fℓn q is defined as: ⟨⃗x,⃗y⟩e = ℓn � i=1 xiyi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (1) Similarly, the Hermitian inner product of ⃗x,⃗y ∈ Fℓn q2 is defined as: ⟨⃗x,⃗y⟩h = ℓn � i=1 xiyq i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (2) If ℓ is even, then symplectic inner product of ⃗x,⃗y ∈ Fℓn q is: ⟨⃗x,⃗y⟩s = N−1 � i=0 (xiyN+i − xN+iyi) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (3) The Euclidean, Hermitian and symplectic dual codes of C can be separately expressed as C ⊥e = {⃗v ∈ Fℓn q | ⟨⃗u,⃗v⟩e = 0, ∀⃗u ∈ C }, C ⊥q = {⃗v ∈ Fℓn q2 | ⟨⃗u,⃗v⟩h = 0, ∀⃗u ∈ C } and C ⊥s = {⃗v ∈ Fℓn q | ⟨⃗u,⃗v⟩s = 0, ∀⃗u ∈ C }.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If C ∩ C ⊥∗ = {0}, then C is an LCD code, where “⊥∗” represents one of Euclidean, Hermitian and symplectic dual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Basics of quasi-cyclic codes Cyclic codes are a particular type of linear codes that are closed under the right cyclic shift operator τ1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' For ⃗x = (x0, x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', xn−1) ∈ Fn q, we denote τ1(⃗x) = (xn−1, x0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' , xn−2) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If C = τ1(C ) then C is called a cyclic code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let R = Fq[x]/ ⟨xn − 1⟩, and define a mapping ϕ1 as follows, ϕ1 : Fn q → R (c0, c1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', cn−1) �→ c0 + c1x + · · · + cn−1xn−1 (4) Clearly, ϕ1 is an isomorphism of Fq-modules and a cyclic code C of length n is an ideal of the quotient ring R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Furthermore, a cyclic code C can be generated by a monic divisor g(x) of xn − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The polynomial g(x) is called the generator polynomial of C , and the dimension of C is n−deg(g(x)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let h(x) = xn −1/g(x) and ˜h(x) = xdeg(h(x))h(x−1), then Euclidean dual code of C is cyclic code with generator polynomial g⊥e(x) = ˜h(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let gq(x) = gq 0+gq 1x+gq 2x+· · ·+ gq n−1xn−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If C is a cyclic codes over Fq2, then Hermitian dual code of C is cyclic code generated by g⊥q(x) = ˜hq(x−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let ⃗x = (x0, x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' , xℓn−1) ∈ Fℓn q , and τ2(⃗x) = (xn−1, x0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' , xn−2, x2n−1, xn, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', x2n−2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' , xnℓ−1, x(n−1)ℓ, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' , xnℓ−2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' A linear space C ⊂ Fℓn q said to be a quasi-cyclic code of index ℓ if C = τ2(C ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Define an Fq -module isomorphism ϕ2 from Fℓn q to Rℓ, ϕ2 : Fℓn q → Rℓ = R ⊕ R ⊕ · · · ⊕ R �c0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', cn−1, cn, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', c2n−1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', cℓ(n−1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', cℓn−2 � �→ (c0(x), c1(x), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', cℓ−1(x)) (5) where ci(x) = �n−1 t=0 ct,ixt, i = 0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', ℓ − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Algebraically, a quasi-cyclic code C is an R-submodule of Rℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' A generator matrix of h-generator quasi-cyclic code with index ℓ has the following form: M = \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed A1,0 A1,1 · · A1,ℓ−1 A2,0 A2,1 · · A2,ℓ−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Ah,0 Ah,1 · · Ah,ℓ−1 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 , (6) where Ai, j are circulant matrices defined by some polynomials ai, j(x) ∈ R, where 1 ≤ i ≤ h and 0 ≤ j ≤ ℓ − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' New characterization of complementary dual codes This section will give a new characterization of LCD codes in terms of codewords, laying the foundation for further proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' First, we make a convention for the representation of inner products, where “l” denotes one of Euclidean and Hermitian inner products, and “∗” denotes one of Euclidean, Hermitian, and symplectic products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let C be a linear code over Fq, then C is an LCD code under the inner product “∗” if and only if ∀c1 ∈ C \\ {0}, ∃c2 ∈ C , ⟨c1, c2⟩∗ � 0 holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' It is obvious that C ∩ C ⊥∗ = {0} is equivalent with ∀c1 ∈ C \\ {0}, c1 � C ⊥∗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Moreover, c1 � C ⊥∗ is equivalent with ∃c2 ∈ C , ⟨c1, c2⟩∗ � 0, so C is LCD equivalent with ∀c1 ∈ C \\ {0}, ∃c2 ∈ C , ⟨c1, c2⟩∗ � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' For ease of presentation, we give the following definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let C1 and C2 are linear codes over Fq, if the following conditions hold: ∀c1 ∈ C1 \\ {0}, ∃c2 ∈ C2, ⟨c1, c2⟩∗ � 0, ∀c2 ∈ C2 \\ {0}, ∃c1 ∈ C1, ⟨c1, c2⟩∗ � 0, (7) then we call C1 and C2 completely non-orthogonal to each other under inner product “∗”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Lemma 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let C1 and C2 are linear codes over Fq, then C1 and C2 completely non-orthogonal to each other holds if and noly if C1 ∩ C ⊥∗ 2 = {0} and C ⊥∗ 1 ∩ C2 = {0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 3 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' This lemma holds from the definition of dual codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let C1 and C2 are two cyclic codes , and separately generated by g1(x) and g2(x), where g1(x) | xn −1 and g2(x) | xn − 1, then C1 and C2 completely Euclidean non-orthogonal to each other is equivalent with g1(x) = ˜g1(x) = g2(x), and C1 and C2 completely Hermitian non-orthogonal to each other is equivalent with g1(x) = ˜gq 1(x) = g2(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' With [26], C1 ∩C ⊥l 2 and C ⊥l 1 ∩C2 are both cyclic codes generated by lcm(g1(x), g⊥l 2 (x)) and lcm(g⊥l 1 (x), g2(x)), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Further, C1∩C ⊥l 2 = {0} and C ⊥l 1 ∩C2 = {0} yield lcm(g1(x), g⊥l 2 (x)) ≡ 0 (mod xn−1) and lcm(g⊥l 1 (x), g2(x)) ≡ 0 (mod xn − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Thus, there are two cases, the first is ˜g2(x) | g1(x) and ˜g1(x) | g2(x) ⇐⇒ g1(x) = ˜g1(x) = g2(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The second is ˜gq 2(x) | g1(x) and ˜gq 1(x) | g2(x) ⇐⇒ g1(x) = ˜gq 1(x) = g2(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Hence, we complete the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Quasi-cyclic complementary dual codes In this section, we determine the sufficient and necessary conditions for quasi-cyclic codes to be LCD codes under Euclidean, Hermitian, and symplectic inner products, starting from Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Some of the symbols used in this paper are described below for ease of expression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let g(x) = g0 + g1x + g2x + · · + gn−1xn−1 ∈ R, [g(x)] denote vector defined by coefficients of g(x) in Fn q, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [g(x)] = [g0, g1, g2, · · · , gn−1], and ¯g(x) = xng(x−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In order to determine the Euclidean inner product between different polynomials in coefficient vector form, the following two lemmas are crucial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Lemma 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' ([28]) Let f(x), g(x) and h(x) be polynomials in R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then the following equation holds for the Euclidean inner product among them: ⟨[f(x)g(x)], [h(x)]⟩e = ⟨[g(x)], [ ¯f(x)h(x)]⟩e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (8) Lemma 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' ([29]) Let f(x), g(x) and h(x) be monic polynomials in R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then the following equality of Hermitian inner product of vectors in Fn q2 holds: ⟨[f(x)g(x)], [h(x)]⟩h = ⟨[g(x)], [ ¯f q(x)h(x)]⟩h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' One-generator complementary dual quasi-cyclic codes Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let g(x) and f j(x) are monic polynomials in R, and g(x) | (xn − 1), 0 ≤ j ≤ ℓ − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If C is a quasi-cyclic code generated by ([g(x)f0(x)], [g(x)f1(x)], · · · , [g(x)fℓ−1(x)]), then C is called 1-generator quasi-cyclic code with index ℓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' A genrartor matrix G of C have the following form: G = (G0,G1, · · · ,Gℓ−1) , (9) where G j are n × n circulant matrices generated by [g(x)f j(x)], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Theorem 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If C is a 1-generator quasi-cyclic code in Definition 2, then the sufficient and necessary conditions for C to be Euclidean LCD code are g(x) = ˜g(x), gcd( ℓ−1 � i=0 fi(x) ¯fi(x), xn−1 g(x) ) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (10) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Suppose a(x),b(x) are any polynomials in R, then any two codewords in C can be represented as c1 = ([a(x)g(x)f0(x)], [a(x)g(x)f1(x)], · · · , [a(x)g(x)fℓ−1(x)]) and c2 = ([b(x)g(x)f0(x)], [b(x)g(x)f1(x)], · · · , [b(x)g(x)fℓ−1(x)]), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The Euclidean inner product of c1 and c2 can be expressed as: ⟨c1, c2⟩e = ℓ−1 � i=0 ⟨[a(x)g(x)fi(x)], [b(x)g(x)fi(x)]⟩e = ℓ−1 � i=0 ⟨[a(x)g(x)fi(x) ¯fi(x)], [b(x)g(x)]⟩e = ⟨[a(x)g(x) ℓ−1 � i=0 fi(x) ¯fi(x)], [b(x)g(x)]⟩e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' From Lemma 1 and 3, it is clear that the sufficient and necessary conditions for C to be Euclidean LCD code is that g(x) = ˜g(x) and gcd( ℓ−1 � i=0 fi(x) ¯fi(x), xn−1 g(x) ) = 1, so this theorem is proved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 4 Since the Hermitian inner product and the Euclidean inner product have a similar form, an analogous approach yields sufficient and necessary conditions for the 1-generator quasi-cyclic code to be a Hermitian LCD code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' There- fore, we give the following theorem without proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If C is a 1-generator quasi-cyclic code in Definition 2, then the sufficient and necessary condition for C to be Hermitian LCD code are g(x) = ˜gq(x), gcd( ℓ−1 � i=0 fi(x) ¯f q i (x), xn−1 g(x) ) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (11) Theorem 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If C is a 1-generator quasi-cyclic code in Definition 2, and ℓ is even.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let m = ℓ/2, then C is symplectic LCD code if and only if the following equations hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' g(x) = ˜g(x), gcd( m−1 � j=0 (f j(x) ¯fm+ j(x) − fm+ j(x) ¯f j(x)), xn−1 g(x) ) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (12) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Suppose a(x),b(x) are any polynomials in R, then any two codewords in C can be represented as c1 = ([a(x)g(x)f0(x)], [a(x)g(x)f1(x)], · · · , [a(x)g(x)fℓ−1(x)]) and c2 = ([b(x)g(x)f0(x)], [b(x)g(x)f1(x)], · · · , [b(x)g(x)fℓ−1(x)]), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The symplectic inner product of c1 and c2 can be expressed as: ⟨c1, c2⟩s = c1 · � 0 Imn −Imn 0 � cT 2 = m−1 � j=0 ⟨[a(x)g(x)f j(x)], [b(x)g(x)fm+ j(x)]⟩e − m−1 � j=0 ⟨[a(x)g(x)fm+ j(x)], [b(x)g(x)f j(x)]⟩e = m−1 � j=0 ⟨[a(x)g(x)f j(x) ¯fm+ j(x)], [b(x)g(x)]⟩e − m−1 � j=0 ⟨[a(x)g(x)fm+ j(x) ¯f j(x)], [b(x)g(x)]⟩e = ⟨[a(x)g(x) m−1 � j=0 (f j(x) ¯fm+ j(x) − fm+ j(x) ¯f j(x))], [b(x)g(x)]⟩e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' From Lemma 1 and 3, it is clear that the sufficient and necessary conditions for C to be symplectic LCD code is that g(x) = ˜g(x) and gcd( m−1 � j=0 (f j(x) ¯fm+ j(x) − fm+ j(x) ¯f j(x)), xn−1 g(x) ) = 1, so this theorem is proved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Multi-generator complementary dual quasi-cyclic codes A multi-generator quasi-cyclic code is a quasi-cyclic code with more than two generators which has a richer algebraic structure than a 1-generator quasi-cyclic code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The specific definition of multi-generator quasi-cyclic code is given below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let gi(x), fi, j(x) are monic polynomials in R, where gi(x) | (xn−1), and 1 ≤ i ≤ h, 0 ≤ j ≤ ℓ−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' If C is a quasi-cyclic code with h-generators: ([g1(x)f1,0(x)], [g1(x)f1,1(x)], · · · , [g1(x)f1,ℓ−1(x)]), ([g2(x)f2,0(x)], [g2(x)f2,1(x)], · ·, [g2(x)f2,ℓ−1(x)]), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', ([gh(x)fh,0(x)], [gh(x)fh,1(x)], · · · ,[gh(x)fh,ℓ−1(x)]), then C is called h-generator quasi-cyclic code with index ℓ, whose generator matrix G has the following form: G = \uf8eb\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ec\uf8ed G1,0 G1,1 · · G1,ℓ−1 G2,0 G2,1 · · G2,ℓ−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Gh,0 Gh,1 · · Gh,ℓ−1 \uf8f6\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f7\uf8f8 , (13) where Gi, j are n × n circulant matrices generated by [gi(x)fi, j(x)], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 5 It follows from Definition 3 that a multi-generator quasi-cyclic code can be considered a code generated by jux- taposing multiple 1-generator quasi-cyclic codes up and down.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Therefore, before determining the overall complete non-orthogonalityof multi-generator quasi-cyclic code, the relationship between the different 1-generator quasi-cyclic codes as constituents needs to be determined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Theorem 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let gi(x), fi, j(x) are monic polynomials in R, and gi(x) | (xn − 1), 1 ≤ i ≤ 2, 0 ≤ j ≤ ℓ − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' C1 and C2 are 1-generator quasi-cyclic codes with generators ([g1(x)f1,0(x)], [g1(x)f1,1(x)], · · · , [g1(x)f1,ℓ−1(x)]) and ([g2(x)f2,0(x)], [g2(x)f2,1(x)], · · · , [g2(x)f2,ℓ−1(x)]), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then, C1 and C2 complete Euclidean non-othogaonal with each other is equivalent with g1(x) = g2(x) = ˜g1(x), gcd( ℓ−1 � j=0 f1, j(x) ¯f2, j(x), xn−1 g1(x)) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (14) where Gi, j are n × n circulant matrices generated by [gi(x)fi, j(x)], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Assume that a(x) and b(x) are arbitrary polynomials in R, then any codewords in C1 and C2 can be represented as c1 = ([a(x)g1(x)f1,0(x)], [a(x)g1(x)f1,1(x)], · · · , [a(x)g1(x)f1,ℓ−1(x)]) and c2 = ([b(x)g2(x)f2,0(x)], [b(x)g2(x)f2,1(x)], · · , [b(x)g2(x)f2,ℓ−1(x)]), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The Euclidean inner product of c1 and c2 can be expressed as ⟨c1, c2⟩e = ℓ−1 � j=0 ⟨[a(x)g1(x)f1, j(x)], [b(x)g2(x)f2, j(x)]⟩e = ℓ−1 � j=0 ⟨[a(x)g1(x)f1, j(x) ¯f2, j(x)], [b(x)g2(x)]⟩e = ⟨[a(x)g1(x) ℓ−1 � j=0 f1, j(x) ¯f2, j(x)], [b(x)g2(x)]⟩e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' From Lemma 3, it is clear that C1 and C2 completely Euclidean non-othogaonal with each other is equivalent with g1(x) = g2(x) = ˜g1(x) and gcd( ℓ−1 � j=0 f1, j(x) ¯f2, j(x), xn−1 g1(x)) = 1 hold, so this theorem is proved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' There is a similar result under Hermitian inner product, so we give the following theorem without proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Theorem 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let gi(x), fi, j(x) are monic polynomials in R, and gi(x) | (xn − 1), 1 ≤ i ≤ 2, 0 ≤ j ≤ ℓ − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' C1 and C2 are 1-generator quasi-cyclic codes with generators �[g1(x)f1,0(x)], [g1(x)f1,1(x)], · · · , [g1(x)f1,ℓ−1(x)]� and �[g2(x)f2,0(x)], [g2(x)f2,1(x)], · · · , [g2(x)f2,ℓ−1(x)]�, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then, C1 and C2 complete Hermitian non-othogaonal with each other is equivalent with g1(x) = g2(x) = ˜gq 1(x), gcd( ℓ−1 � j=0 f1, j(x) ¯f q 2, j(x), xn−1 g1(x)) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (15) where Gi, j are n × n circulant matrices generated by [gi(x)fi, j(x)], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Theorem 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let ℓ be even, m = ℓ/2, gi(x), fi, j(x) are monic polynomials in R, and gi(x) | (xn −1), 1 ≤ i ≤ 2, 0 ≤ j ≤ ℓ−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' C1 and C2 are 1-generator quasi-cyclic codes with generators ([g1(x)f1,0(x)], [g1(x)f1,1(x)], · · · , [g1(x)f1,ℓ−1(x)]) and ([g2(x)f2,0(x)], [g2(x)f2,1(x)], · · · , [g2(x)f2,ℓ−1(x)]), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then, C1 and C2 complete symplectic non-orthogonal with each other is equivalent with g1(x) = g2(x) = ˜g1(x), gcd( m−1 � j=0 (f1, j(x) ¯f2,m+ j(x) − f1,m+ j(x) ¯f2, j(x)), xn−1 g1(x)) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (16) where Gi, j are n × n circulant matrices generated by [gi(x)fi, j(x)], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 6 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Suppose a(x) and b(x) are any polynomials in R, then any two codewords in C can be represented as c1 = ([a(x)g1(x)f1,0(x)], [a(x)g1(x)f1,1(x)], · · · , [a(x)g1(x)f1,ℓ−1(x)]) and c2 = ([b(x)g2(x)f2,0(x)], [b(x)g2(x)f2,1(x)], · · , [b(x)g2(x)f2,ℓ−1(x)]), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' The symplectic inner product of c1 and c2 can be expressed as: ⟨c1, c2⟩s = c1 · � 0 Imn −Imn 0 � cT 2 = m−1 � j=0 ⟨[a(x)g1(x)f1, j(x)], [b(x)g(x)f2,m+ j(x)]⟩e − m−1 � j=0 ⟨[a(x)g1(x)f1,m+ j(x)], [b(x)g2(x)f2, j(x)]⟩e = m−1 � j=0 ⟨[a(x)g1(x)f1, j(x) ¯f2,m+ j(x)], [b(x)g2(x)]⟩e − m−1 � j=0 ⟨[a(x)g1(x)f1,m+ j(x) ¯f2, j(x)], [b(x)g2(x)]⟩e = ⟨[a(x)g1(x) m−1 � j=0 (f1, j(x) ¯f2,m+ j(x) − f1,m+ j(x) ¯f2, j(x))], [b(x)g2(x)]⟩e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' From Lemma 1 and 3, it is clear that the sufficient and necessary conditions for C1 and C2 complete symplectic non-othogaonal with each other is is that g1(x) = g2(x) = ˜g1(x) and gcd( m−1 � j=0 (f1, j(x) ¯f2,m+ j(x) − f1,m+ j(x) ¯f2, j(x)), xn−1 g1(x)) = 1 hold, so this theorem is proved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Theorem 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let C be an h-generator quasi-cyclic code in Definition 3 and Ci are 1-generator quasi-cyclic codes generated by ([gi(x)fi,0(x)], [gi(x)fi,1(x)], · · · , [gi(x)fi,ℓ−1(x)]), i ∈ {1, 2, · · · , h}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then C is LCD code if and only if ∀r, s ∈ {1, 2, · · · , h}, there exist Cr ∩ C ⊥∗ s = {0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' With Definition 3, C = C1 + C2 + · · · + Ch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' C is LCD if and only if C ∩ C ⊥∗ = {0}, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=', ∀c1 ∈ C , there exist c2 ∈ C , ⟨c1, c2⟩∗ � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Therefore, ∀r, s ∈ {1, 2, · · · , h}, there is Cr ∩ C ⊥∗ s = {0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In addition, C ⊥∗ = C ⊥∗ 1 ∩ C ⊥∗ 2 ∩ · · · ∩ C ⊥∗ h .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' For the reason that ∀r, s ∈ {1, 2, · · · , h}, there is Cr ∩ C ⊥∗ s = {0}, so C ∩ C ⊥∗ = {0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' It is evident that by combining Theorems 6-12, we have the following theorems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Theorem 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let C be h-generator quasi-cyclic code with index ℓ over Fq in Definition 3, then the sufficient and necessary conditions for C to be Euclidean LCD code is g1(x) = g2(x) = · · · = gh(x) = ˜g1(x), ∀r, s ∈ {1, 2, · · · , h}, gcd( ℓ−1 � j=0 fr, j(x) ¯fs, j(x), xn−1 g1(x)) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (17) Theorem 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let C be h-generator quasi-cyclic code with index ℓ over Fq2 in Definition 3, then the sufficient and necessary conditions for C to be Hermitian LCD code is g1(x) = g2(x) = · · · = ˜gq 1(x), ∀r, s ∈ {1, 2, · · · , h}, gcd( ℓ−1 � j=0 fr, j(x) ¯f q s, j(x), xn−1 g1(x)) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (18) Theorem 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let ℓ be even, m = ℓ/2, C be h-generator quasi-cyclic code with index ℓ over Fq in Definition 3, then the sufficient and necessary conditions for C to be symplectic LCD code are g1(x) = g2(x) = · · · = ˜g1(x), ∀r, s ∈ {1, 2, · · · , h}, gcd( m−1 � i=0 (fs,i(x) ¯fr,m+i(x) − fs,m+i(x) ¯fr,i(x)), xn−1 g1(x)) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' (19) 7 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Some examples of good complementary dual quasi-cyclic codes In this section, we provide some examples from our construction of good LCD code to help the reader better understand the approach of this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Specifically, Examples 1, 2, and 3 explain the construction methods of the Euclidean, Hermitian, and symplectic LCD codes in Section 4, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Example 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Set q = 2, n = 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let g(x) = 1, f0(x) = 1, f1(x) = x12+x7+x3+x+1, and f2(x) = x12+x11+x9+x8+x5+ x3 + x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' One can easy to check that g(x) = ˜g(x), gcd(�2 i=0 fi(x) ¯fi(x), xn−1 g(x) ) = 1, so ([g(x)f0(x)], [g(x)f1(x)], [g(x)f2(x)]) can generate a 1-generator quasi-cyclic Euclidean LCD code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then, using Magma [25] we can compute this code have parameters [39, 13, 12]2, whose weight distribution is w(z) = 1 + 39z12 + 208z13 + 286z14 + 325z15 + 546z16 + · · + z39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Observe that a code with parameters [39, 13, 11]2 is the best-known binary Euclidean LCD with length 39 and dimension 13 in [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Therefore, the corresponding minimum distance record can be improved to 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Set q2 = 4, n = 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let w be a primitive element of F4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let g(x) = x + 1, and f0(x) = w2x18 + x17 + w2x16 + x15 + x14 + w2x13 + wx12 + wx11 + w2x10 + x9 + x8 + w2x7 + wx6 + x5 + wx4 + x3 + x2 + wx + 1, f1(x) = x15 + w2x14 + wx12 + w2x10 + wx7 + x6 + wx5 + wx4 + wx3 + wx2 + w2x + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' One can easy to check that g(x) = ˜gq(x), gcd(f0(x) ¯f q 0 (x) + f1(x) ¯f q 1 (x), xn−1 g(x) ) = 1, so ([g(x)f0(x)], [g(x)f1(x)]) can generate a 1-generator quasi- cyclic Hermitian LCD code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then, using Magma [25] we can compute this code have parameters [38, 18, 12]4, and it’s dual is [38, 20, 11]4, their weight distributions are w(z) = 1 + 912z12 + 7296z13 + 44859z14 + 199842z15 + 886977z16 + · · + 1229205z38 and w(z⊥) = 1 + 2736z11 + 21888z12 + 127224z13 + 684171z14 + 3211722z15 + · · · + 19686954z38, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' It is worth stating that both [38, 18, 12]4 and [38, 20, 11]4 reach the minimum distance lower bounds in code table [30], so they are both best codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Example 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Set q = 2, n = 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Let g(x) = x3 + 1, f0(x) = x18 + x16 + x15 + x14 + x13 + x12 + x8 + x7 + x3 + 1, f1(x) = x20 + x19 + x18 + x15 + x14 + x9 + x7 + x5 + x3 + x2 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' One can easy to check that g(x) = ˜g(x), gcd(f0(x) ¯f1(x) − f1(x) ¯f0(x), xn−1 g(x) ) = 1, so ([g(x)f0(x)], [g(x)f1(x)]) can generate a 1-generator quasi-cyclic symplectic LCD code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Then, using Magma [25] we can calculate this code have parameters [42, 18, 9]s 2, whose symplectic weight distribution is w(z) = 1 + 448z9 + 1344z10 + 3906z11 + 9051z12 + 18753z13 + · · · + 609z21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Therefore, there exists a trace Hermitian ACD code with parameters (21, 9, 9)4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' It should be noted that the best known Hermitian LCD code in [14] with length 21 and dimension 9 have parameters [21, 9, 8]4, so our symplectic construction has better performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Conclusion In this work, we propose an equivalence definition for LCD codes, which allows us to determine the comple- mentary duality of linear codes from the codeword level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Furthermore, depending on this result, we determine the necessary and sufficient conditions for quasi-cyclic codes to be LCD codes concerning Euclidean, Hermitian, and symplectic inner products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Finally, we give some specific examples of the construction of Euclidean, Hermitian, and symplectic quasi-cyclic LCD codes to show that quasi-cyclic codes can be utilized to construct good LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' However, by Theorems 13, 14 and 15, the reader can find that when the quasi-cyclic codes are multi-generated, different generators can only pick the same self-reciprocal polynomial, which significantly limits the performance of the quasi-cyclic LCD codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Therefore, research on quasi-cyclic LCD codes should mainly focus on 1-generator quasi-cyclic codes, which will help to construct quasi-cyclic LCD codes of low dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' In the case of LCD codes with dimensions close to ℓn/2, it is required to use 1-generator quasi-cyclic codes with a small index or other mathematical tools.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' We hope this will attract scholars’ interest in research related to quasi-cyclic codes and advance this area together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Acknowledgments This work is supported by the National Natural Science Foundation of China under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='U21A20428, 61972413, 61901525, 62002385, Natural Science Foundation of Shaanxi under Grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='2021JM-216, 2021JQ-335.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 8 References [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Massey, Linear codes with complementary duals, Discrete Mathematics 106 (1992) 337–342.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [2] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Carlet, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Guilley, Complementary dual codes for counter-measures to side-channel attacks, Advances in Mathematics of Communications 10 (1) (2016) 131.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [3] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Sendrier, Linear codes with complementary duals meet the Gilbert–Varshamov bound, Discrete mathematics 285 (1-3) (2004) 345–347.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [4] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Li, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Ding, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Li, LCD cyclic codes over finite fields, IEEE Transactions on Information Theory 63 (7) (2017) 4344–4356.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [5] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Dougherty, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='-L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Kim, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Ozkaya, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Sok, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Sol´e, The combinatorics of LCD codes: linear programming bound and orthogonal matrices, International Journal of Information and Coding Theory 4 (2-3) (2017) 116–128.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [6] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Galvez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Kim, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Lee, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Roe, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Won, Some bounds on binary LCD codes, Cryptography and Communications 10 (4) (2018) 719–728.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [7] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Carlet, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Mesnager, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Tang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Qi, Euclidean and Hermitian LCD MDS codes, Designs, Codes and Cryptography 86 (11) (2018) 2605–2618.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [8] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Li, Hermitian LCD codes from cyclic codes, Designs, Codes and Cryptography 86 (10) (2018) 2261–2278.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [9] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Araya, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Harada, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Saito, Quaternary Hermitian linear complementary dual codes, IEEE Transactions on Information Theory 66 (5) (2019) 2751–2759.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [10] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Carlet, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Mesnager, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Tang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Qi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Pellikaan, Linear codes over Fq are equivalent to LCD codes for q > 3, IEEE Transactions on Information Theory 64 (4) (2018) 3010–3017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [11] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Bouyuklieva, Optimal binary LCD codes, Designs, Codes and Cryptography 89 (11) (2021) 2445–2461.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [12] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Harada, Construction of binary LCD codes, ternary LCD codes and quaternary Hermitian LCD codes, Designs, Codes and Cryptography 89 (10) (2021) 2295–2312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [13] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Ishizuka, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Saito, Construction for both self-dual codes and LCD codes, arXiv preprint arXiv:2108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='12544.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [14] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Ishizuka, Construction of quaternary Hermitian LCD codes, Cryptography and Communications (2022) 1–13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [15] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Li, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Shi, Improved lower and upper bounds for LCD codes, arXiv preprint arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='04936.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [16] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Liu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Liu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Yu, New binary and ternary LCD codes from matrix-product codes, Linear and Multilinear Algebra 70 (5) (2022) 809–823.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [17] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Shi, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Liu, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' ¨Ozbudak, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Sol´e, Additive cyclic complementary dual codes over F4, Finite Fields and Their Applications 83 (2022) 102087.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [18] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Calderbank, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Rains, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Shor, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Sloane, Quantum error correction via codes over GF (4), IEEE Transactions on Information Theory 44 (4) (1998) 1369–1387.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [19] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Xu, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Du, Constructions of symplectic LCD MDS codes, Bulletin of the Malaysian Mathematical Sciences Society 44 (5) (2021) 3377– 3390.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [20] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Xia Huang, Jin Li, Constructions of symplectic LCD MDS codes from quasi-cyclic codes, Advances in Mathematics of Communica- tions 16 (4) (2022) 779–790.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [21] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Yang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Massey, The condition for a cyclic code to have a complementary dual, Discrete Mathematics 126 (1994) 391–393.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [22] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Esmaeili, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Yari, On complementary-dual quasi-cyclic codes, Finite Fields and Their Applications 15 (2009) 375–386.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [23] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' G¨uneri, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' ¨Ozkaya, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Sol´e, Quasi-cyclic complementary dual codes, Finite Fields and Their Applications 42 (2016) 67–80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [24] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Saleh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Esmaeili, On complementary dual quasi-twisted codes, Journal of Applied Mathematics and Computing 56 (1) (2018) 115–129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [25] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Bosma, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Cannon, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Playoust, The magma algebra system I: The user language, Journal of Symbolic Computation 24 (3-4) (1997) 235–265.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [26] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Huffman, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Pless, Fundamentals of error-correcting codes, Cambridge university press, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='K, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [27] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Huffman, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Kim, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Sol´e, Concise Encyclopedia of Coding Theory, Chapman and Hall/CRC, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [28] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Galindo, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Hernando, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Matsumoto, Quasi-cyclic constructions of quantum codes, Finite Fields and Their Applications 52 (2018) 261– 280.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [29] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Lv, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Wang, Quantum codes derived from one-generator quasi-cyclic codes with Hermitian inner product, International Journal of Theoretical Physics 59 (1) (2020) 300–312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' [30] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' Grassl, Bounds on the minimum distance of linear codes and quantum codes, Online available at http://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='codetables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='de, accessed on 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} +page_content=' 9' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ttAzT4oBgHgl3EQfBfrk/content/2301.00945v1.pdf'} diff --git a/v9FKT4oBgHgl3EQf4i7d/content/tmp_files/2301.11934v1.pdf.txt b/v9FKT4oBgHgl3EQf4i7d/content/tmp_files/2301.11934v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..c3ea0716a9b8a82626ca27d8a6e77de90797cda0 --- /dev/null +++ b/v9FKT4oBgHgl3EQf4i7d/content/tmp_files/2301.11934v1.pdf.txt @@ -0,0 +1,769 @@ +Machine learning assisted discovery of exotic criticality in a planar quasicrystal +Doruk Efe G¨okmen,1 Sounak Biswas,2 Sebastian D. Huber,1 Zohar Ringel,3 Felix Flicker,4 and Maciej Koch-Janusz5, 6 +1Institute for Theoretical Physics, ETH Zurich, 8093 Zurich, Switzerland +2Institut f¨ur Theoretische Physik und Astrophysik, +Universit¨at W¨urzburg, 97074 W¨urzburg, Germany +3Hebrew University, Racah Institute of Physics, Jerusalem, 9190401, Israel +4School of Physics and Astronomy, Cardiff University, Cardiff CF24 3AA, United Kingdom +5Department of Physics, University of Zurich, 8057 Zurich, Switzerland +6James Franck Institute, The University of Chicago, Chicago, Illinois 60637, USA +Our understanding of universality and phase transitions is deeply rooted in the notion of scaling. +Indeed continuous phase transitions typically exhibit scale-invariant behavior facilitating the use of +standard renormalization group (RG) techniques. Some critical systems, however, evade full scale +invariance, in that observables reproduce themselves only under a set of discrete scale factors δn. +Such discrete scale invariance (DSI) presents a conceptual challenge as many of our theoretical tools +fail to be directly applicable. Here, we report on a discovery of emergent degrees of freedom for +the recently studied classical dimer model on the quasiperiodic Ammann-Beenker tiling. Using a +machine learning assisted approach we establish that their statistics can be expressed in terms of +emergent large-scale super-dimers. Moreover, the result reveals an emergent discrete scale invari- +ance, where the same dimer problem is re-appearing at successive discrete coarse-grained levels, +demonstrating proximity to an RG fixed point. Our findings not only provide a rare example of +successfully applying RG to a strongly-correlated system on a two-dimensional quasicrystal, but, +owing to the generality of the approach, delineate a new paradigm in analysis and a practical tool +for discovering coarse-grained representations in quasiperiodic and other non-homogeneous systems. +Introduction – +The study of critical phenomena has +been a major driving force in condensed matter physics. +It spurred the discovery of the renormalization group +(RG) [1–4] and of conformal field theories [5–7]. It also +underlies the classification of topological states of matter +via their gapless boundaries [8, 9]. +An important currently unfolding development in the +theory of critical phenomena is the study of strongly cor- +related systems on quasicrystals (QC). The self-similar +structure of such systems paired with the lack of trans- +lation symmetry make an RG treatment both appro- +priate, and at the same time far from straightforward. +To wit, though tailored and largely limited to 1D, RG +methods [10, 11] nevertheless reveal new types of criti- +cal points in the context of many-body localization [11– +13]. In 2D the critical Sutherland-Kalugin-Katz wave- +functions [14, 15] of tight binding Hamiltonians provide a +stepping stone towards correlated physics on QCs. From +a phenomenological perspective, several QC critical sys- +tems [10, 16, 17] show evidence of non-conformal critical +points with discrete (DSI), rather than the usual contin- +uous scale invariance (see Fig.1). +DSI has been found +in non-equilibrium scenarios [17–19]; non-conformal crit- +ical points, more generally, have been suggested [20] +as a resolution of the discrepancies between numer- +ics [21], and experiments, particularly on the lambda- +point anomaly [22]. +Defying the intuition that quasiperiodicity is often ir- +relevant in an RG sense [23, 24], these observations indi- +cate that its interplay with strong interactions provides +a path towards novel critical phenomena. One system +suspected of harbouring such an exotic type of critical- +ity consists of classical dimers on the Ammann-Beenker +FIG. 1. +Conformal invariance and discrete scale in- +variance. (a) On a regular lattice, continuous field variables +emerge from discrete degrees of freedom as the lattice be- +comes irrelevant under coarse graining. (b) In quasicrystals, +self-similarity and strong correlations may conspire to keep +the coarse-grained the degrees of freedom discrete and graph +structure relevant upon zooming out. +(AB) tiling, a 2D bipartite quasicrystal with a recursive +structure and a ‘forbidden’ octagonal symmetry [16]. The +dimers themselves are an abstraction of resonant valence +bonds [25] arising from strong correlations in quantum +antiferromagnets. Recently, some of us proved the ex- +istence of defect-free dimer coverings on the AB tiling +arXiv:2301.11934v1 [cond-mat.stat-mech] 27 Jan 2023 + +2 +and reported Monte Carlo (MC) evidence for quasi long +ranged correlations [16]. An analytical account of the en- +semble described by these dimer coverings, in particular +of its potentially critical nature, remained outstanding. +This is due to the complex structure of correlations, and +lack of understanding regarding the relevant degrees of +freedom (DOF) driving this critical behaviour. +The lack of applicable RG methods and the high di- +mensional configuration space of the problem naturally +inspire the use of machine learning (ML). Despite im- +pressive results [26–34], ML has yet to establish itself as +a guide to theorizing about unexplored systems. +Here, we demonstrate such a development. We lever- +age analytical results reformulating RG in the language +of formal compression theory [35], and a numerical al- +gorithm employing contrastive learning to execute these +ideas in regular lattices [36, 37]. We extend these tools +to quasiperiodic systems (in fact, systems on arbitrary +static graphs), and apply them to the AB dimer prob- +lem, obtaining qualitatively new theoretical results. +Our algorithm explicitly constructs the effective DOF +in large patches of the system. The mapping is local, and +turns out to depend on a linear function of microscopic +dimer occupations. It reveals them to be clock variables, +perfectly compatible with the hierarchical structure of +the AB tiling (hosting, e.g., Z8 variables in 8-fold sym- +metric patches). Moreover, the nearby clock variables are +strongly correlated: they align with one of their neigh- +bours, locking the pair into an effective dimer at a large +scale. These emergent “super-dimers” obey an approxi- +mate effective dimer exclusion principle, in effect yielding +a system close to the original AB dimer model, but at a +larger scale. The stability of this picture across scales +strongly suggests proximity of the original system to an +RG fixed point, and an emergent discrete scale invari- +ance of the critical theory. In a parallel work, some of us +provide a microscopic interpretation of these emergent +super-dimers as certain alternating dimer paths on the +AB lattice, and study the criticality numerically [38]. +The system – +The Ammann-Beenker (AB) construc- +tion gives quasiperiodic tilings of the plane utilizing two +distinct plaquettes: a rhombus and a square [39]. Like +their more famous cousins, the Penrose tilings [40], AB +tilings feature diffraction patterns exhibiting crystallo- +graphically ‘forbidden’ symmetries, here 8-fold [41]. Like- +wise, they can also can be generated by a recursive pro- +cedure in which an inflation map σ acts on a small +seed patch by decomposing the constituent plaquettes as +shown in Fig.2b, and subsequently rescaling all the edge +lengths by the silver ratio δ. A special role is played by 8- +fold coordinated vertices: under inflations all lower coor- +dinated vertices ultimately become (and stay) 8-vertices. +Each 8-vertex is characterised by an order, i.e. the max- +imal number of inverse deflations σ−1 after which it still +remains 8-fold coordinated. Intuitively, the order of an +8-vertex specifies the maximal size of the local patch cen- +tered on it, within which the lattice appears perfectly +8-fold symmetric. The quasiperiodic AB lattice is thus +FIG. 2. Self-similarity of the AB tiling, and the coarse +graining blocks. +(a) A microscopic dimer configuration +(small black links) on the AB tiling’s edges, with an overlaid +AB superlattice, self-similar to the microscopic one. The effec- +tive DOF at a supervertex of a given (colour coded) valence +will be obtained by coarse graining the dimer configuration +in the surrounding region V of a shape dictated by the infla- +tion rules and shown as a polygon of a matching colour. (b) +The inflation (deflation) σ2(−2) of the elementary rhombi and +squares generating the tiling, with parts of the polygonal do- +mains indicated in colour. Coarse graining all such polygonal +patches executes a deflation σ−2 of the original AB lattice, +yielding the superlattice shown. +invariant under discrete rescalings. +This invariance is +easily visualized for even order deflations σ2n by drawing +a super-lattice connecting 8-fold vertices (Fig.2a). +Dimer models enjoy a deceptively simple definition: +the microscopic dimers live on the links of (any) lat- +tice, which can be either occupied or empty. The key +element is a set of hard local constraints: at every ver- +tex where the links meet, one and only one of the links +is occupied. This gives rise to a surprisingly rich phe- +nomenology. Dimer models on regular lattices have been +studied extensively, in part due to their relevance to high- +Tc superconductivity [42], but have since been shown to +support topological order and fractionalisation [43, 44] +and exotic critical points [45]. The quantum and classical +versions are closely related. The latter not only is a start- +ing point for the quantum version [46, 47], but is impor- +tant in its own right, with deep connections to combina- + +3 +FIG. 3. Finding effective clock variables. (a) Coarse graining transformation Λ mapping Monte Carlo configurations in V +into bitstrings H on supervertices of σ−2 deflated tiling. (b, f) The length of the bitstring H8(3) is determined by the saturation +of mutual information at 4 (2) bits at 8(3)-supervertices. Each bit Hi is decided by the sign of linear transformation Λi · V. +The respective optimal filters Λ in (c, g) carry a representation of the local spatial symmetries of corresponding supervertices, +namely C8 and mirror. (d, h) The probability distributions P(H8(3)) sparsely occupy the space of codes, and form abstract +clock variables. (e) Particularly, H8 forms a closed 8-loop, where each state has exactly two neighbours with Hamming-distance +1. (i) Transitions between adjacent clock-states are induced by the representations of the local symmetries on filters, enabling +to identify abstract clock-states with spatial directions along the links of the quasiperiodic lattice (see main text). +torics [48–50] and the study of random surfaces [51, 52]. +Recent work has begun to explore the interplay of +(strongly-correlated) dimer physics and quasiperiodic- +ity. +Particularly, AB tilings, in contrast to Penrose +tilings [53], host perfectly matched dimer configurations +in the thermodynamic limit (i.e. with a vanishing density +of defects), and numerically computed dimer correlations +exhibit a quasi power-law decay with a complex spatial +structure [16]. Moreover, the combinatorial proof of per- +fect matching pointed to a hierarchy of self-similar effec- +tive matching problems at different scales between spatial +regions bounded by ‘pseudomembranes’, i.e. collections +of edges which collectively host exactly one dimer. +Taken together these facts suggest a conjecture that +not only the AB tilings themselves, but crucially also the +physics of the dimers on the AB tilings, exhibit discrete +scale invariance [16] – a potentially striking and unusual +example of the relevance of quasiperiodicity for the criti- +cal behaviour. A proof, and a microscopic physical mech- +anism at the level of the dimer ensemble was, however, +absent. +The putative criticality naturally calls for a renormal- +isation group (RG) analysis. Alas, RG approaches for +quasiperiodic systems in D ≥ 2 dimensions are in their +infancy and, in particular, to the best of our knowledge +no such tools are available for the AB dimer system. +Results – +To solve this challenge we employ the re- +cent results on a formal correspondence between lossy +compression theory and real-space RG [35]: the relevant +operators of the theory, supported in a local spatial patch +V, emerge as variational solutions to a suitably posed +information bottleneck problem [54] (see Appendix A). +Intuitively, they are compressions of the subsystem V, +which preserve the most information about its environ- +ment E. While previously only discussed for regular lat- +tices, we note here that this holds in any static graph, in +particular for quasiperiodic lattices, and thus it provides +a theoretical avenue to define an RG procedure for such +systems. +An efficient approximate numerical realization of this +approach on regular lattices was introduced by some of +us as the RSMI-NE algorithm [36, 37]. Here, we extend +it to arbitrary static graphs [55]. Keeping the implemen- +tation details to Appendix A, we directly apply it to the +AB dimer system. We address, in turn, two key ques- +tions: what are the local effective DOF, and what are +their correlations. This is systematically revealed by the +analysis of data provided by our algorithm. +To uncover the emergent DOFs, we need to specify the +spatial partition for the blocks V first. In the AB tiling + +4 +FIG. 4. Emergent dimer exclusion rule and self-similar dimer-dimer correlations across scales. (a) The probability +distribution of microscopic (i.e. δ0) dimers (in greyscale) on an AB patch, conditioned on one of the links (in orange) hosting +a dimer. (b, c) First two columns: the probabilities P(H|H3) of the emergent clock variables on the δ2 and δ4 superlattice +(in greyscale), conditioned on two distinct states of one of the 3-clocks (in orange). The third column shows distributions +conditioned on a state of the central 8-clock. Binding of adjacent clock variables into super-dimers obeying dimer exclusion +constraints is revealed by sharply peaked conditional distributions. The effective super-dimers reproduce also longer-range +dimer-dimer correlations at both δ2 and δ4 scales. (d, e) Examples of (a single component of) optimal coarse-graining filters +producing the central 8-state clock variable at scales δ2 and δ4. The latter comprises 2760 microscopic links. +there are natural choices, set by the recursive structure of +the AB lattice itself [56]. At each scale, the AB tiling can +be covered by four classes of blocks [16], shown in Fig.2 +in different colours, each deflating to vertices of differing +connectivity in the super-lattice. +In each inequivalent class, the algorithm identifies the +emergent DOF as a Zn clock variable, with n the connec- +tivity, or class, of V in the superlattice. This is revealed +as follows: the variational compression map Λ assigns +to an MC dimer configuration V a short binary code H +(Fig.3a), the bits being set by applying individual com- +ponents of Λ to V (itself a long bitstring of dimer occupa- +tions in the block). Each component is a priori a general +nonlinear map, parametrized by a neural network, whose +output is finally binarized. +The length of the code is not supplied, but found, +by sequentially increasing the number of components +in Λ, and training the compression of V to optimally +preserve the mutual information with its environment +E. +Crucially, the maximal retained information about +E plateaus with the optimal code-length depending on +the class of V. Particularly, for V in class-8 the optimal +number of components is four, while for class-3 only two +(Fig.3b,f). Further, nonlinearity of Λ networks does not +improve compression: the same amount of information +is preserved with only linear components. Optimal lin- +ear maps on the space of dimer configurations on V are +shown for classes 8 and 3 in Figs.3c and g, respectively. +We note that RSMI-NE training is unsupervised. +To unravel the physical content of these encodings, we +further query the RSMI-NE outputs. The code statis- +tics in Fig.3d reveal striking features: of the sixteen 4-bit +codes in class-8 only eight are ever assigned, with half of +the codes unused. Yet in Fig.3b a 3-bit encoding, which +has exactly eight available codes, is suboptimal. More- +over, the frequencies of all class-8 codes used are the same +(3d), while for class-3 only two frequencies are identi- +cal (3h). These puzzling results indicate that RSMI-NE +finds structure beyond merely the number of states of the +DOF, which is essential to correlations with E, and which +cannot be encoded with fewer bits. +We thus investigate the codes, and the Λ maps. We +first note that the 4-bit codes form a closed 8-cycle, with +neighbours differing by a single bit-flip, and each code +having exactly two 1-bit distant neighbours (Fig.3e) [57]. +The uniform frequencies and the cyclic structure of the +code hint at a symmetry. +Indeed, a class-8 patch V of the AB lattice is lo- +cally symmetric under π/4 rotations. +We observe +that under such rotations the components of the op- +timal Λ map in Fig.3c change as (Λ1, Λ2, Λ3, Λ4) → +(Λ4, −Λ3, −Λ1, −Λ2), which is a representation of a gen- + +5 +erator of the cyclic group C8. We emphasize that it is the +compression map, and consequently the emergent DOF +now carrying a representation of what is a priori a (lo- +cal) symmetry only of the AB lattice. Similar analysis +can be performed for other classes of V, which have a +mirror symmetry. In particular, under its action for the +class-3 patch in Fig.3g we have (Λ1, Λ2) → (Λ2, Λ1), ex- +plaining equal frequency of the 01 and 10 codes. Hence, +we conclude that, rather than becoming continuous, the +emergent DOFs of the dimer system at σ2 scale remain +discrete, and mimic the local symmetry of the underlying +super-lattice. This holds equally at σ4 scale, providing +the first indication of a discrete scale invariance. Hav- +ing found the emergent DOFs in each class V individu- +ally, we turn to their correlations, where discrete scale +invariance manifests itself fully. To this end we simul- +taneously coarse grain dimer configurations in multiple +blocks, which collectively form an AB superlattice as in +Fig.2a, using the trained compression maps (Fig.3c,g). +As noted before, the number of states of each emer- +gent DOF equals the connectivity of the supervertex it +lives on. Since the distribution of each state’s frequen- +cies reflects the underlying superlattice symmetry, these +internal DOFs can be identified with spatial orientations +along the edges of the superlattice. For example, since +mirror symmetry w.r.t. the axis connecting the 8- and +3-vertices in Fig.3i relates the code frequencies of the 3- +vertex codes 01 and 10 (Fig.3h), the remaining state 11 +is the one pointing towards the 8-vertex. +We probe the correlations by conditioning on the state +of one of the vertices. In Fig.4b,c, fragments of σ−2 and +σ−4 superlattices are shown, with the state of the con- +ditioning variable, identified with a direction, in orange, +while the conditional distribution of DOFs at the other +vertices in greyscale. +Remarkably, this distribution is +very strongly correlated, effectively forcing occupation of +some states, and excluding others. To wit, when the 3- +vertex DOF points towards the 8-vertex, the distribution +P(H|H3) of the latter is sharply peaked in the matching +direction, while no other neighbour of the 3-vertex points +towards it (allowing, for example, the identification of the +8-vertex code 1011 with a spatial orientation in Fig.3i). +Conversely, when the 3-vertex DOF points towards one +of its other neighbours, it is “matched” by it, while the +8-vertex DOF distribution has zero weight precisely and +only towards that 3-vertex. +Examining all such correlations we arrive at a strik- +ing conclusion: +the effective DOFs in V’s throughout +the lattice are paired with one and only one of their +neighbours into emergent “super-dimers” on the edges of +the superlattice. The exclusion of certain clock variable +orientations in Figs.3(a-e) is a precise reflection of the +hard dimer-constraints, which these super-dimers obey. +Moreover, comparison of further correlations to those of +the microscopic dimers in Fig.4a reveals that not just +the local-dimer constraints, but also longer-range cor- +relations are reproduced correctly. +The physics of the +microscopic dimer model on the AB lattice is thus repli- +cated to a high degree of accuracy at the δ2 scale, and +again, at the δ4 scale (where ‘locking’ is even sharper, see +Fig.4c), thereby demonstrating DSI across three scales. +The quasiperiodicity of the AB lattice and the strong +interactions of the dimer model conspire to re-create self- +similar DOF at a higher scale, giving rise to discrete scale +invariance (Fig.1), which we uncover guided by the out- +puts of the RSMI-NE algorithm. +We emphasize the dual computational and conceptual +aspect of this result: each compression map Λ at the σ4 +scale is a highly structured function of approximately 103 +microscopic dimer occupations (∼ 2103 configurations), +effectively impossible to guess or analyze by hand only, +and yet providing sharp and concise physical insights +about DOFs, symmetries, and correlations. +We have, +in effect, reached a point were ML techniques can not +only assist, but facilitate progress in theoretical physics. +Our approach provides a roadmap for unravelling uni- +versal behaviour, extending RG methods or more broadly +performing dimensional reduction in settings where con- +figuration spaces with complex topology appear. We ex- +pect this to be of importance to the study of quasicrys- +tals, more general inhomogeneous systems such as metal- +lic glasses, and biological networks. +Acknowledgements – +D.E.G., and S.D.H. gratefully +acknowledge financial support from the Swiss National +Science Foundation and the NCCR QSIT. S.B. ac- +knowledges support by the European Research Coun- +cil under the European Union Horizon 2020 Research +and Innovation Programme via Grant Agreement No. +804213-TMCS. Z.R. acknowledges support from ISF +grant 2250/19. +M.K.-J. gratefully acknowledges finan- +cial support from the European Union’s Horizon 2020 +programme under Marie Sklodowska-Curie Grant Agree- +ment No. 896004 (COMPLEX ML). +[1] Leo P. Kadanoff, “Scaling laws for ising models near Tc,” +Phys. Phys. Fiz. 2, 263–272 (1966). +[2] Kenneth G. Wilson, “Renormalization Group and Crit- +ical Phenomena. I. Renormalization Group and the +Kadanoff Scaling Picture,” Phys. Rev. B 4, 3174–3183 +(1971). +[3] Kenneth G. Wilson, “Renormalization Group and Criti- +cal Phenomena. II. Phase-Space Cell Analysis of Critical +Behavior,” Phys. Rev. B 4, 3184–3205 (1971). +[4] Kenneth G. Wilson, “The renormalization group: Crit- +ical phenomena and the Kondo problem,” Rev. Mod. +Phys. 47, 773–840 (1975). +[5] A.A. Belavin, A.M. Polyakov, +and A.B. Zamolod- +chikov, “Infinite conformal symmetry in two-dimensional +quantum field theory,” Nuclear Physics B 241, 333–380 +(1984). + +6 +[6] Joseph Polchinski, “Scale and conformal invariance in +quantum field theory,” Nuclear Physics B 303, 226–236 +(1988). +[7] John Cardy, “Conformal Field Theory and Statistical +Mechanics,” in Les Houches Summer School: Session 89: +Exacts Methods in Low-Dimensional Statistical Physics +and Quantum Computing (2008) arXiv:0807.3472 [cond- +mat.stat-mech]. +[8] Xie Chen, Yuan-Ming Lu, +and Ashvin Vishwanath, +“Symmetry-protected topological phases from decorated +domain walls,” Nature Communications 5, 3507 (2014). +[9] Andreas P. Schnyder, Shinsei Ryu, Akira Furusaki, and +Andreas W. W. Ludwig, “Classification of topological +insulators and superconductors in three spatial dimen- +sions,” Phys. Rev. B 78, 195125 (2008). +[10] Utkarsh Agrawal, Sarang Gopalakrishnan, +and Ro- +main Vasseur, “Universality and quantum criticality in +quasiperiodic spin chains,” Nature Communications 11, +2225 (2020). +[11] P. J. D. Crowley, A. Chandran, +and C. R. Laumann, +“Quasiperiodic quantum ising transitions in 1d,” Phys. +Rev. Lett. 120, 175702 (2018). +[12] Nicolas Mac´e, Nicolas Laflorencie, +and Fabien Alet, +“Many-body localization in a quasiperiodic fibonacci +chain,” SciPost Physics 6, 050 (2019). +[13] F. Setiawan, Dong-Ling Deng, and J. H. Pixley, “Trans- +port properties across the many-body localization tran- +sition in quasiperiodic and random systems,” Phys. Rev. +B 96, 104205 (2017). +[14] Bill Sutherland, “Self-similar ground-state wave function +for electrons on a two-dimensional penrose lattice,” Phys. +Rev. B 34, 3904–3909 (1986). +[15] Nicolas Mac´e, Anuradha Jagannathan, Pavel Kalugin, +R´emy Mosseri, +and Fr´ed´eric Pi´echon, “Critical eigen- +states and their properties in one- and two-dimensional +quasicrystals,” Phys. Rev. B 96, 045138 (2017). +[16] Jerome Lloyd, Sounak Biswas, Steven H. Simon, S.A. +Parameswaran, +and Felix Flicker, “Statistical mechan- +ics of dimers on quasiperiodic Ammann-Beenker tilings,” +Phys. Rev. B 106 (2022), 10.1103/physrevb.106.094202. +[17] Grace M. Sommers, Michael J. Gullans, +and David A. +Huse, “Self-dual quasiperiodic percolation,” arXiv e- +prints +, +arXiv:2206.11290 +(2022), +arXiv:2206.11290 +[cond-mat.stat-mech]. +[18] Didier Sornette, “Discrete-scale invariance and complex +dimensions,” Physics reports 297, 239–270 (1998). +[19] Jeremy T. Young, Alexey V. Gorshkov, Michael Foss- +Feig, +and Mohammad F. Maghrebi, “Nonequilibrium +fixed points of coupled ising models,” Phys. Rev. X 10, +011039 (2020). +[20] Jonas F. Karcher, Noah Charles, Ilya A. Gruzberg, and +Alexander D. Mirlin, “Generalized multifractality at spin +quantum Hall transition,” Annals of Physics 435, 168584 +(2021), special issue on Philip W. Anderson. +[21] Shai M Chester, Walter Landry, Junyu Liu, David +Poland, David Simmons-Duffin, Ning Su, and Alessan- +dro Vichi, “Carving out ope space and precise o (2) model +critical exponents,” Journal of High Energy Physics +2020, 1–52 (2020). +[22] J. A. Lipa, D. R. Swanson, J. A. Nissen, T. C. P. Chui, +and U. E. Israelsson, “Heat capacity and thermal relax- +ation of bulk helium very near the lambda point,” Phys. +Rev. Lett. 76, 944–947 (1996). +[23] J. M Luck, “A classification of critical phenomena on +quasi-crystals and other aperiodic structures,” Euro- +physics Letters 24, 359–364 (1993). +[24] Ronaldo N. Ara´ujo and Eric C. Andrade, “Conventional +superconductivity in quasicrystals,” Phys. Rev. B 100, +014510 (2019). +[25] P. W. Anderson, “The Resonating Valence Bond State +in La2CuO4 and Superconductivity,” Science 235, 1196– +1198 (1987). +[26] J. Jumper and et al., “Highly accurate protein structure +prediction with AlphaFold,” Nature 596, 583–589 (2021). +[27] Alvaro Sanchez-Gonzalez, +Jonathan Godwin, +Tobias +Pfaff, Rex Ying, Jure Leskovec, +and Peter Battaglia, +“Learning to simulate complex physics with graph net- +works,” in Proceedings of the 37th International Con- +ference on Machine Learning, Proceedings of Machine +Learning Research, Vol. 119, edited by Hal Daum´e III +and Aarti Singh (PMLR, 2020) pp. 8459–8468. +[28] Frank No´e, Alexandre Tkatchenko, Klaus-Robert M¨uller, +and Cecilia Clementi, “Machine learning for molecular +simulation,” Annual Review of Physical Chemistry 71, +361–390 (2020). +[29] E. D. Cubuk, S. S. Schoenholz, J. M. Rieser, B. D. Mal- +one, J. Rottler, D. J. Durian, E. Kaxiras, and A. J. Liu, +“Identifying structural flow defects in disordered solids +using machine-learning methods,” Phys. Rev. Lett. 114, +108001 (2015). +[30] Giacomo Torlai, Guglielmo Mazzola, Juan Carrasquilla, +Matthias Troyer, Roger Melko, +and Giuseppe Carleo, +“Neural-network quantum state tomography,” Nature +Physics 14, 447–450 (2018). +[31] Marin Bukov, Alexandre G. R. Day, Dries Sels, Phillip +Weinberg, Anatoli Polkovnikov, and Pankaj Mehta, “Re- +inforcement learning in different phases of quantum con- +trol,” Phys. Rev. X 8, 031086 (2018). +[32] Thomas F¨osel, Petru Tighineanu, Talitha Weiss, +and +Florian Marquardt, “Reinforcement learning with neural +networks for quantum feedback,” Phys. Rev. X 8, 031084 +(2018). +[33] Denis Boyda, +Gurtej Kanwar, +S´ebastien Racani`ere, +Danilo Jimenez Rezende, Michael S. Albergo, Kyle Cran- +mer, Daniel C. Hackett, and Phiala E. Shanahan, “Sam- +pling using SU(n) gauge equivariant flows,” Phys. Rev. +D 103, 074504 (2021). +[34] Giuseppe Carleo, Ignacio Cirac, Kyle Cranmer, Lau- +rent Daudet, Maria Schuld, Naftali Tishby, Leslie Vogt- +Maranto, +and Lenka Zdeborov´a, “Machine learning +and the physical sciences,” Rev. Mod. Phys. 91, 045002 +(2019). +[35] Amit Gordon, Aditya Banerjee, Maciej Koch-Janusz, +and Zohar Ringel, “Relevance in the renormalization +group and in information theory,” Phys. Rev. Lett. 126, +240601 (2021). +[36] Doruk Efe G¨okmen, Zohar Ringel, Sebastian D. Huber, +and Maciej Koch-Janusz, “Statistical physics through the +lens of real-space mutual information,” Phys. Rev. Lett. +127, 240603 (2021). +[37] Doruk Efe G¨okmen, Zohar Ringel, Sebastian D. Huber, +and Maciej Koch-Janusz, “Symmetries and phase dia- +grams with real-space mutual information neural estima- +tion,” Phys. Rev. E 104, 064106 (2021). +[38] S. Biswas and S.A Parameswaran, “Discrete scale invari- +ant fixed point in a classical quasiperiodic dimer model,” +(2023). + +7 +[39] B. Grunbaum and G.C. Shephard, Tilings and Patterns +(W. H. Freeman and Company, 1986). +[40] R. Penrose, “The role of aesthetics in pure and applied +mathematical research,” Bull. Inst. Math. Appl. 10, 266– +271 (1974). +[41] Marjorie Senechal, Quasicrystals and Geometry (Cam- +bridge University Press, 1996). +[42] Daniel S. Rokhsar and Steven A. Kivelson, “Supercon- +ductivity and the quantum hard-core dimer gas,” Phys. +Rev. Lett. 61, 2376–2379 (1988). +[43] R. Moessner and S. L. Sondhi, “Resonating Valence Bond +Phase in the Triangular Lattice Quantum Dimer Model,” +Phys. Rev. Lett. 86, 1881–1884 (2001). +[44] R. Moessner, S. L. Sondhi, +and Eduardo Fradkin, +“Short-ranged resonating valence bond physics, quantum +dimer models, and ising gauge theories,” Phys. Rev. B +65, 024504 (2001). +[45] T. Senthil, Ashvin Vishwanath, Leon Balents, Subir +Sachdev, and Matthew P. A. Fisher, “Deconfined quan- +tum critical points,” Science 303, 1490–1494 (2004). +[46] Fabien Alet, Jesper Lykke Jacobsen, Gr´egoire Misguich, +Vincent Pasquier, Fr´ed´eric Mila, +and Matthias Troyer, +“Interacting Classical Dimers on the Square Lattice,” +Phys. Rev. Lett. 94, 235702 (2005). +[47] Fabien Alet, +Yacine Ikhlef, +Jesper Lykke Jacobsen, +Gr´egoire Misguich, +and Vincent Pasquier, “Classical +dimers with aligning interactions on the square lattice,” +Phys. Rev. E 74, 041124 (2006). +[48] Richard Kenyon and Andrei Okounkov, “What is a +dimer?” Notices of the AMS 52 (2005). +[49] P.W. Kasteleyn, “The statistics of dimers on a lattice: I. +The number of dimer arrangements on a quadratic lat- +tice,” Physica 27, 1209–1225 (1961). +[50] Henry Cohn, Richard Kenyon, +and James Propp, “A +variational principle for domino tilings,” Journal of the +American Mathematical Society 14, 297–346 (2001). +[51] Richard Kenyon, Andrei Okounkov, and Scott Sheffield, +“Dimers and amoebae,” Annals of Mathematics 163, +1019–1056 (2006). +[52] Richard Kenyon and Andrei Okounkov, “Limit shapes +and the complex Burgers equation,” Acta.Math. 199, +263–303 (2007). +[53] Felix Flicker, Steven H. Simon, and S.A. Parameswaran, +“Classical Dimers on Penrose Tilings,” Phys. Rev. X 10 +(2020). +[54] N. Tishby, F. C. Pereira, and W. Bialek, “The informa- +tion bottleneck method,” in Proceedings of the 37th Aller- +ton Conference on Communication, Control and Compu- +tation, Vol. 49 (2001). +[55] Doruk Efe G¨okmen, Zohar Ringel, Sebastian D. Huber, +and Maciej Koch-Janusz, “RSMI-NE/RSMI-NE,” (2021- +2023). +[56] A. Jagannathan, “Quantum spins and quasiperiodicity: +A real space renormalization group approach,” Phys. +Rev. Lett. 92, 047202 (2004). +[57] Interestingly, this solves the four dimensional ‘coil in the +box’ problem familiar from coding theory. +[58] Maciej Koch-Janusz and Zohar Ringel, “Mutual informa- +tion, neural networks and the renormalization group,” +Nature Physics 14, 578–582 (2018). +[59] Patrick M. Lenggenhager, Doruk Efe G¨okmen, Zohar +Ringel, Sebastian D. Huber, +and Maciej Koch-Janusz, +“Optimal renormalization group transformation from in- +formation theory,” Phys. Rev. X 10, 011037 (2020). +[60] NetworkX developers, “NetworkX: Network Analysis in +Python,” (2014-2023). +Supplemental materials +Appendix A: Methods +a. +Real-space +mutual +information +based +coarse- +graining – +The method used to construct the effec- +tive degrees of freedom (DOF) is an extension of the +RSMI approach first introduced by some of the authors +in [58]. A system of microscopic DOFs is described by +a (large dimensional) random variable X distributed ac- +cording to some joint probability distriburion P (X). A +coarse-graining (CG) of a partition X = � +i Vi into new +variables X ′ = � +i Hi is then defined as a conditional +probability distribution P(X ′|X) = � +i PΛi(Hi|Vi) where +X ′ = � +i Hi and where the product is over individual CG +of a blocks variables Vi �→ Hi. We note here there is +a distinction between the spatial patch V (part of the +lattice), denoted with regular font, and a configurations +of DOFs supported on it this patch (a random variable) +V, denoted with calligraphic font. For brevity we used a +single notation in the main text, as the correct meaning +is implied by the context. Maximisation of the real-space +mutual information (RSMI) +IΛ(H : E) = E(H,E) [log PΛ(H, E) − log PΛ(H)P(E)] +between H and its distant environment E provides a vari- +ational principle for the CG map Λ to distill the most +relevant long-range features [35, 59]. +The information +capacity of H is compressed by the constraint of a prede- +termined number of bits, thereby providing an approxi- +mation of the information bottleneck problem [35]. +This computationally difficult variational principle can +nevertheless be efficiently implemented with differen- +tiable parametric lower bounds on mutual information. +Such bounds are parametrised by deep NN, and op- +timised simultaneously with the parameters Λ of the +coarse-graining using stochastic gradient descent. This +is the RSMI-NE algorithm, which some of the authors +introduced recently [36, 37]. +Here we extended this formalism and the RSMI-NE +package to systems on arbitrary static graphs by cast- +ing the configurations into vectors according to the fixed +coordinate system defined by the graph. The new graph- +enabled RSMI-NE code using the NetworkX backend [60] +is available publicly [55]. +b. +Ansatz – +We used PΛ(H|V) with an inner- +product ansatz H := sign(Λ·V), parametrised by a linear +NN Λ. +Furthermore, for mapping binary variables we +used annealed Gumbel-softmax reparametrisation with +exponential relaxation rate 5 × 10−3. The critic function +in the variational RSMI lower-bound is implemented us- +ing a separable architecture +f(H, E) = u(H)Tv(E) + +8 +where u and v are two-layer deep NNs with hidden di- +mension 16 and output dimension 8 (the latter is con- +tracted in the inner product of the two networks). We +trained the neural networks using stochastic gradient de- +scent with learning rate 10−3. +The coarse-grained block variable V at a given scale δn +is defined on the σn inflated tiles V shown with different +colours in Fig.2a. The environment regions E, are defined +as a shell with radius given by a fixed graph-distance +from the centre of V . In particular for δ2, E is defined +by an inner radius LEi = 9 and outer radius LEo = 24, +whereas for δ4 we used LEi = 40, LEo = 64, as shown +in Fig.S1. In Fig.S2 we show examples of corresponding +σ−4 coarse-graining filters optimised for these regions. +Appendix B: The odd scales +Our analysis of the coarse graining transformations of +the dimer model on the AB tiling did not find evidence for +a discrete scale invariant description in terms of super- +dimer variables under all rescalings, but only for even +order ones (i.e. under deflations σ−2k). This is in contrast +to the AB tiling itself (i.e. just the AB lattice), which is +invariant under any order of deflation. +In addition, our method finds quantitatively and qual- +itatively distinct behaviour at odd orders σ−1 and σ−3. +The maximal mutual information IΛ(H : E) attained for +the coarse graining at a 3-supervertex is non-monotonic, +exhibiting, within error, two distinct values characteriz- +ing the even and odd scales (with the odd scales’ infor- +mation reduced by almost a factor of two),as shown in +Fig.S3. +Furthermore, the optimised coarse graining does not +yield a well-defined three-state clock variable at odd +scales. Indeed, for even scales the optimisation robustly +yields a well-defined set of three clusters (correspond- +ing to the three clock states) even in the distribution of +pre-activations Λ · V, while at odd scales the distribu- +tion of pre-activations lacks any such clear structure. We +emphasize that this is not an optimization issue: com- +putationally, σ−4 coarse graining is a more challenging +problem than σ−3 (because of much large size of random +variables involved). + +9 +FIG. S1. Block and environment regions. Highlighted in blue are examples of the coarse-graining blocks V , and their +annular environment regions E used for the 3- (a) and 8-vertices (b) at the largest scale considered (i.e. δ4). The microscopic +lattice, and the σ−2 superlattice are shown. The centers of the “kite” and “star” shaped regions V are at the 8-vertices whose +positions form the σ−4 superlattice. +FIG. S2. Optimal σ−4 coarse-graining transformations. (a) 8-supervertex filters. (b) 3-supervertex filters. + +10 +1 +2 +3 +4 +scale transformation +0.5 +0.6 +0.7 +0.8 +0.9 +I ( +: ) (bits) +FIG. S3. +Mutual information across different scale +transformations. The maximal MI for the coarse graining +at a 3-supervertex. For odd order rescaling transformations +σ−1,−3, the information attained by the compression is sys- +tematically lower compared to the even ones σ−2,−4. + diff --git a/v9FKT4oBgHgl3EQf4i7d/content/tmp_files/load_file.txt b/v9FKT4oBgHgl3EQf4i7d/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..b92b64428a88edc7a55fc32e5f794bfed005cb14 --- /dev/null +++ b/v9FKT4oBgHgl3EQf4i7d/content/tmp_files/load_file.txt @@ -0,0 +1,544 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf,len=543 +page_content='Machine learning assisted discovery of exotic criticality in a planar quasicrystal Doruk Efe G¨okmen,1 Sounak Biswas,2 Sebastian D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Huber,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='1 Zohar Ringel,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3 Felix Flicker,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='4 and Maciej Koch-Janusz5,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 6 1Institute for Theoretical Physics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' ETH Zurich,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 8093 Zurich,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Switzerland 2Institut f¨ur Theoretische Physik und Astrophysik,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Universit¨at W¨urzburg,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 97074 W¨urzburg,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Germany 3Hebrew University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Racah Institute of Physics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Jerusalem,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 9190401,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Israel 4School of Physics and Astronomy,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Cardiff University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Cardiff CF24 3AA,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' United Kingdom 5Department of Physics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' University of Zurich,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 8057 Zurich,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Switzerland 6James Franck Institute,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The University of Chicago,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Chicago,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Illinois 60637,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' USA Our understanding of universality and phase transitions is deeply rooted in the notion of scaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Indeed continuous phase transitions typically exhibit scale-invariant behavior facilitating the use of standard renormalization group (RG) techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Some critical systems, however, evade full scale invariance, in that observables reproduce themselves only under a set of discrete scale factors δn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Such discrete scale invariance (DSI) presents a conceptual challenge as many of our theoretical tools fail to be directly applicable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Here, we report on a discovery of emergent degrees of freedom for the recently studied classical dimer model on the quasiperiodic Ammann-Beenker tiling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Using a machine learning assisted approach we establish that their statistics can be expressed in terms of emergent large-scale super-dimers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Moreover, the result reveals an emergent discrete scale invari- ance, where the same dimer problem is re-appearing at successive discrete coarse-grained levels, demonstrating proximity to an RG fixed point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Our findings not only provide a rare example of successfully applying RG to a strongly-correlated system on a two-dimensional quasicrystal, but, owing to the generality of the approach, delineate a new paradigm in analysis and a practical tool for discovering coarse-grained representations in quasiperiodic and other non-homogeneous systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Introduction – The study of critical phenomena has been a major driving force in condensed matter physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' It spurred the discovery of the renormalization group (RG) [1–4] and of conformal field theories [5–7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' It also underlies the classification of topological states of matter via their gapless boundaries [8, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' An important currently unfolding development in the theory of critical phenomena is the study of strongly cor- related systems on quasicrystals (QC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The self-similar structure of such systems paired with the lack of trans- lation symmetry make an RG treatment both appro- priate, and at the same time far from straightforward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' To wit, though tailored and largely limited to 1D, RG methods [10, 11] nevertheless reveal new types of criti- cal points in the context of many-body localization [11– 13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In 2D the critical Sutherland-Kalugin-Katz wave- functions [14, 15] of tight binding Hamiltonians provide a stepping stone towards correlated physics on QCs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' From a phenomenological perspective, several QC critical sys- tems [10, 16, 17] show evidence of non-conformal critical points with discrete (DSI), rather than the usual contin- uous scale invariance (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' DSI has been found in non-equilibrium scenarios [17–19];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' non-conformal crit- ical points, more generally, have been suggested [20] as a resolution of the discrepancies between numer- ics [21], and experiments, particularly on the lambda- point anomaly [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Defying the intuition that quasiperiodicity is often ir- relevant in an RG sense [23, 24], these observations indi- cate that its interplay with strong interactions provides a path towards novel critical phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' One system suspected of harbouring such an exotic type of critical- ity consists of classical dimers on the Ammann-Beenker FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Conformal invariance and discrete scale in- variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (a) On a regular lattice, continuous field variables emerge from discrete degrees of freedom as the lattice be- comes irrelevant under coarse graining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (b) In quasicrystals, self-similarity and strong correlations may conspire to keep the coarse-grained the degrees of freedom discrete and graph structure relevant upon zooming out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (AB) tiling, a 2D bipartite quasicrystal with a recursive structure and a ‘forbidden’ octagonal symmetry [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The dimers themselves are an abstraction of resonant valence bonds [25] arising from strong correlations in quantum antiferromagnets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Recently, some of us proved the ex- istence of defect-free dimer coverings on the AB tiling arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='11934v1 [cond-mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='stat-mech] 27 Jan 2023 2 and reported Monte Carlo (MC) evidence for quasi long ranged correlations [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' An analytical account of the en- semble described by these dimer coverings, in particular of its potentially critical nature, remained outstanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This is due to the complex structure of correlations, and lack of understanding regarding the relevant degrees of freedom (DOF) driving this critical behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The lack of applicable RG methods and the high di- mensional configuration space of the problem naturally inspire the use of machine learning (ML).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Despite im- pressive results [26–34], ML has yet to establish itself as a guide to theorizing about unexplored systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Here, we demonstrate such a development.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We lever- age analytical results reformulating RG in the language of formal compression theory [35], and a numerical al- gorithm employing contrastive learning to execute these ideas in regular lattices [36, 37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We extend these tools to quasiperiodic systems (in fact, systems on arbitrary static graphs), and apply them to the AB dimer prob- lem, obtaining qualitatively new theoretical results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Our algorithm explicitly constructs the effective DOF in large patches of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The mapping is local, and turns out to depend on a linear function of microscopic dimer occupations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' It reveals them to be clock variables, perfectly compatible with the hierarchical structure of the AB tiling (hosting, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=', Z8 variables in 8-fold sym- metric patches).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Moreover, the nearby clock variables are strongly correlated: they align with one of their neigh- bours, locking the pair into an effective dimer at a large scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' These emergent “super-dimers” obey an approxi- mate effective dimer exclusion principle, in effect yielding a system close to the original AB dimer model, but at a larger scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The stability of this picture across scales strongly suggests proximity of the original system to an RG fixed point, and an emergent discrete scale invari- ance of the critical theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In a parallel work, some of us provide a microscopic interpretation of these emergent super-dimers as certain alternating dimer paths on the AB lattice, and study the criticality numerically [38].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The system – The Ammann-Beenker (AB) construc- tion gives quasiperiodic tilings of the plane utilizing two distinct plaquettes: a rhombus and a square [39].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Like their more famous cousins, the Penrose tilings [40], AB tilings feature diffraction patterns exhibiting crystallo- graphically ‘forbidden’ symmetries, here 8-fold [41].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Like- wise, they can also can be generated by a recursive pro- cedure in which an inflation map σ acts on a small seed patch by decomposing the constituent plaquettes as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='2b, and subsequently rescaling all the edge lengths by the silver ratio δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' A special role is played by 8- fold coordinated vertices: under inflations all lower coor- dinated vertices ultimately become (and stay) 8-vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Each 8-vertex is characterised by an order, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' the max- imal number of inverse deflations σ−1 after which it still remains 8-fold coordinated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Intuitively, the order of an 8-vertex specifies the maximal size of the local patch cen- tered on it, within which the lattice appears perfectly 8-fold symmetric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The quasiperiodic AB lattice is thus FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Self-similarity of the AB tiling, and the coarse graining blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (a) A microscopic dimer configuration (small black links) on the AB tiling’s edges, with an overlaid AB superlattice, self-similar to the microscopic one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The effec- tive DOF at a supervertex of a given (colour coded) valence will be obtained by coarse graining the dimer configuration in the surrounding region V of a shape dictated by the infla- tion rules and shown as a polygon of a matching colour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (b) The inflation (deflation) σ2(−2) of the elementary rhombi and squares generating the tiling, with parts of the polygonal do- mains indicated in colour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Coarse graining all such polygonal patches executes a deflation σ−2 of the original AB lattice, yielding the superlattice shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' invariant under discrete rescalings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This invariance is easily visualized for even order deflations σ2n by drawing a super-lattice connecting 8-fold vertices (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='2a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Dimer models enjoy a deceptively simple definition: the microscopic dimers live on the links of (any) lat- tice, which can be either occupied or empty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The key element is a set of hard local constraints: at every ver- tex where the links meet, one and only one of the links is occupied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This gives rise to a surprisingly rich phe- nomenology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Dimer models on regular lattices have been studied extensively, in part due to their relevance to high- Tc superconductivity [42], but have since been shown to support topological order and fractionalisation [43, 44] and exotic critical points [45].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The quantum and classical versions are closely related.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The latter not only is a start- ing point for the quantum version [46, 47], but is impor- tant in its own right, with deep connections to combina- 3 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Finding effective clock variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (a) Coarse graining transformation Λ mapping Monte Carlo configurations in V into bitstrings H on supervertices of σ−2 deflated tiling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (b, f) The length of the bitstring H8(3) is determined by the saturation of mutual information at 4 (2) bits at 8(3)-supervertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Each bit Hi is decided by the sign of linear transformation Λi · V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The respective optimal filters Λ in (c, g) carry a representation of the local spatial symmetries of corresponding supervertices, namely C8 and mirror.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (d, h) The probability distributions P(H8(3)) sparsely occupy the space of codes, and form abstract clock variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (e) Particularly, H8 forms a closed 8-loop, where each state has exactly two neighbours with Hamming-distance 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (i) Transitions between adjacent clock-states are induced by the representations of the local symmetries on filters, enabling to identify abstract clock-states with spatial directions along the links of the quasiperiodic lattice (see main text).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' torics [48–50] and the study of random surfaces [51, 52].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Recent work has begun to explore the interplay of (strongly-correlated) dimer physics and quasiperiodic- ity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Particularly, AB tilings, in contrast to Penrose tilings [53], host perfectly matched dimer configurations in the thermodynamic limit (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' with a vanishing density of defects), and numerically computed dimer correlations exhibit a quasi power-law decay with a complex spatial structure [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Moreover, the combinatorial proof of per- fect matching pointed to a hierarchy of self-similar effec- tive matching problems at different scales between spatial regions bounded by ‘pseudomembranes’, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' collections of edges which collectively host exactly one dimer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Taken together these facts suggest a conjecture that not only the AB tilings themselves, but crucially also the physics of the dimers on the AB tilings, exhibit discrete scale invariance [16] – a potentially striking and unusual example of the relevance of quasiperiodicity for the criti- cal behaviour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' A proof, and a microscopic physical mech- anism at the level of the dimer ensemble was, however, absent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The putative criticality naturally calls for a renormal- isation group (RG) analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Alas, RG approaches for quasiperiodic systems in D ≥ 2 dimensions are in their infancy and, in particular, to the best of our knowledge no such tools are available for the AB dimer system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Results – To solve this challenge we employ the re- cent results on a formal correspondence between lossy compression theory and real-space RG [35]: the relevant operators of the theory, supported in a local spatial patch V, emerge as variational solutions to a suitably posed information bottleneck problem [54] (see Appendix A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Intuitively, they are compressions of the subsystem V, which preserve the most information about its environ- ment E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' While previously only discussed for regular lat- tices, we note here that this holds in any static graph, in particular for quasiperiodic lattices, and thus it provides a theoretical avenue to define an RG procedure for such systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' An efficient approximate numerical realization of this approach on regular lattices was introduced by some of us as the RSMI-NE algorithm [36, 37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Here, we extend it to arbitrary static graphs [55].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Keeping the implemen- tation details to Appendix A, we directly apply it to the AB dimer system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We address, in turn, two key ques- tions: what are the local effective DOF, and what are their correlations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This is systematically revealed by the analysis of data provided by our algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' To uncover the emergent DOFs, we need to specify the spatial partition for the blocks V first.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In the AB tiling 4 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Emergent dimer exclusion rule and self-similar dimer-dimer correlations across scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (a) The probability distribution of microscopic (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' δ0) dimers (in greyscale) on an AB patch, conditioned on one of the links (in orange) hosting a dimer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (b, c) First two columns: the probabilities P(H|H3) of the emergent clock variables on the δ2 and δ4 superlattice (in greyscale), conditioned on two distinct states of one of the 3-clocks (in orange).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The third column shows distributions conditioned on a state of the central 8-clock.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Binding of adjacent clock variables into super-dimers obeying dimer exclusion constraints is revealed by sharply peaked conditional distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The effective super-dimers reproduce also longer-range dimer-dimer correlations at both δ2 and δ4 scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (d, e) Examples of (a single component of) optimal coarse-graining filters producing the central 8-state clock variable at scales δ2 and δ4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The latter comprises 2760 microscopic links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' there are natural choices, set by the recursive structure of the AB lattice itself [56].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' At each scale, the AB tiling can be covered by four classes of blocks [16], shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='2 in different colours, each deflating to vertices of differing connectivity in the super-lattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In each inequivalent class, the algorithm identifies the emergent DOF as a Zn clock variable, with n the connec- tivity, or class, of V in the superlattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This is revealed as follows: the variational compression map Λ assigns to an MC dimer configuration V a short binary code H (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3a), the bits being set by applying individual com- ponents of Λ to V (itself a long bitstring of dimer occupa- tions in the block).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Each component is a priori a general nonlinear map, parametrized by a neural network, whose output is finally binarized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The length of the code is not supplied, but found, by sequentially increasing the number of components in Λ, and training the compression of V to optimally preserve the mutual information with its environment E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Crucially, the maximal retained information about E plateaus with the optimal code-length depending on the class of V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Particularly, for V in class-8 the optimal number of components is four, while for class-3 only two (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3b,f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Further, nonlinearity of Λ networks does not improve compression: the same amount of information is preserved with only linear components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Optimal lin- ear maps on the space of dimer configurations on V are shown for classes 8 and 3 in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3c and g, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We note that RSMI-NE training is unsupervised.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' To unravel the physical content of these encodings, we further query the RSMI-NE outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The code statis- tics in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3d reveal striking features: of the sixteen 4-bit codes in class-8 only eight are ever assigned, with half of the codes unused.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Yet in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3b a 3-bit encoding, which has exactly eight available codes, is suboptimal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' More- over, the frequencies of all class-8 codes used are the same (3d), while for class-3 only two frequencies are identi- cal (3h).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' These puzzling results indicate that RSMI-NE finds structure beyond merely the number of states of the DOF, which is essential to correlations with E, and which cannot be encoded with fewer bits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We thus investigate the codes, and the Λ maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We first note that the 4-bit codes form a closed 8-cycle, with neighbours differing by a single bit-flip, and each code having exactly two 1-bit distant neighbours (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3e) [57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The uniform frequencies and the cyclic structure of the code hint at a symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Indeed, a class-8 patch V of the AB lattice is lo- cally symmetric under π/4 rotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We observe that under such rotations the components of the op- timal Λ map in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3c change as (Λ1, Λ2, Λ3, Λ4) → (Λ4, −Λ3, −Λ1, −Λ2), which is a representation of a gen- 5 erator of the cyclic group C8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We emphasize that it is the compression map, and consequently the emergent DOF now carrying a representation of what is a priori a (lo- cal) symmetry only of the AB lattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Similar analysis can be performed for other classes of V, which have a mirror symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In particular, under its action for the class-3 patch in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3g we have (Λ1, Λ2) → (Λ2, Λ1), ex- plaining equal frequency of the 01 and 10 codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Hence, we conclude that, rather than becoming continuous, the emergent DOFs of the dimer system at σ2 scale remain discrete, and mimic the local symmetry of the underlying super-lattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This holds equally at σ4 scale, providing the first indication of a discrete scale invariance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Hav- ing found the emergent DOFs in each class V individu- ally, we turn to their correlations, where discrete scale invariance manifests itself fully.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' To this end we simul- taneously coarse grain dimer configurations in multiple blocks, which collectively form an AB superlattice as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='2a, using the trained compression maps (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3c,g).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' As noted before, the number of states of each emer- gent DOF equals the connectivity of the supervertex it lives on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Since the distribution of each state’s frequen- cies reflects the underlying superlattice symmetry, these internal DOFs can be identified with spatial orientations along the edges of the superlattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' For example, since mirror symmetry w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' the axis connecting the 8- and 3-vertices in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3i relates the code frequencies of the 3- vertex codes 01 and 10 (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3h), the remaining state 11 is the one pointing towards the 8-vertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We probe the correlations by conditioning on the state of one of the vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='4b,c, fragments of σ−2 and σ−4 superlattices are shown, with the state of the con- ditioning variable, identified with a direction, in orange, while the conditional distribution of DOFs at the other vertices in greyscale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Remarkably, this distribution is very strongly correlated, effectively forcing occupation of some states, and excluding others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' To wit, when the 3- vertex DOF points towards the 8-vertex, the distribution P(H|H3) of the latter is sharply peaked in the matching direction, while no other neighbour of the 3-vertex points towards it (allowing, for example, the identification of the 8-vertex code 1011 with a spatial orientation in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Conversely, when the 3-vertex DOF points towards one of its other neighbours, it is “matched” by it, while the 8-vertex DOF distribution has zero weight precisely and only towards that 3-vertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Examining all such correlations we arrive at a strik- ing conclusion: the effective DOFs in V’s throughout the lattice are paired with one and only one of their neighbours into emergent “super-dimers” on the edges of the superlattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The exclusion of certain clock variable orientations in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3(a-e) is a precise reflection of the hard dimer-constraints, which these super-dimers obey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Moreover, comparison of further correlations to those of the microscopic dimers in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='4a reveals that not just the local-dimer constraints, but also longer-range cor- relations are reproduced correctly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The physics of the microscopic dimer model on the AB lattice is thus repli- cated to a high degree of accuracy at the δ2 scale, and again, at the δ4 scale (where ‘locking’ is even sharper, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='4c), thereby demonstrating DSI across three scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The quasiperiodicity of the AB lattice and the strong interactions of the dimer model conspire to re-create self- similar DOF at a higher scale, giving rise to discrete scale invariance (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='1), which we uncover guided by the out- puts of the RSMI-NE algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We emphasize the dual computational and conceptual aspect of this result: each compression map Λ at the σ4 scale is a highly structured function of approximately 103 microscopic dimer occupations (∼ 2103 configurations), effectively impossible to guess or analyze by hand only, and yet providing sharp and concise physical insights about DOFs, symmetries, and correlations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We have, in effect, reached a point were ML techniques can not only assist, but facilitate progress in theoretical physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Our approach provides a roadmap for unravelling uni- versal behaviour, extending RG methods or more broadly performing dimensional reduction in settings where con- figuration spaces with complex topology appear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We ex- pect this to be of importance to the study of quasicrys- tals, more general inhomogeneous systems such as metal- lic glasses, and biological networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Acknowledgements – D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=', and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' gratefully acknowledge financial support from the Swiss National Science Foundation and the NCCR QSIT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' ac- knowledges support by the European Research Coun- cil under the European Union Horizon 2020 Research and Innovation Programme via Grant Agreement No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 804213-TMCS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' acknowledges support from ISF grant 2250/19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' gratefully acknowledges finan- cial support from the European Union’s Horizon 2020 programme under Marie Sklodowska-Curie Grant Agree- ment No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 896004 (COMPLEX ML).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [1] Leo P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Kadanoff, “Scaling laws for ising models near Tc,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Fiz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 2, 263–272 (1966).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [2] Kenneth G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Wilson, “Renormalization Group and Crit- ical Phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Renormalization Group and the Kadanoff Scaling Picture,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 4, 3174–3183 (1971).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [3] Kenneth G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Wilson, “Renormalization Group and Criti- cal Phenomena.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Phase-Space Cell Analysis of Critical Behavior,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 4, 3184–3205 (1971).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [4] Kenneth G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Wilson, “The renormalization group: Crit- ical phenomena and the Kondo problem,” Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 47, 773–840 (1975).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [5] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Belavin, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Polyakov, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Zamolod- chikov, “Infinite conformal symmetry in two-dimensional quantum field theory,” Nuclear Physics B 241, 333–380 (1984).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 6 [6] Joseph Polchinski, “Scale and conformal invariance in quantum field theory,” Nuclear Physics B 303, 226–236 (1988).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [7] John Cardy, “Conformal Field Theory and Statistical Mechanics,” in Les Houches Summer School: Session 89: Exacts Methods in Low-Dimensional Statistical Physics and Quantum Computing (2008) arXiv:0807.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='3472 [cond- mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='stat-mech].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [8] Xie Chen, Yuan-Ming Lu, and Ashvin Vishwanath, “Symmetry-protected topological phases from decorated domain walls,” Nature Communications 5, 3507 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [9] Andreas P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Schnyder, Shinsei Ryu, Akira Furusaki, and Andreas W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Ludwig, “Classification of topological insulators and superconductors in three spatial dimen- sions,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 78, 195125 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [10] Utkarsh Agrawal, Sarang Gopalakrishnan, and Ro- main Vasseur, “Universality and quantum criticality in quasiperiodic spin chains,” Nature Communications 11, 2225 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [11] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Crowley, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Chandran, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Laumann, “Quasiperiodic quantum ising transitions in 1d,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 120, 175702 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [12] Nicolas Mac´e, Nicolas Laflorencie, and Fabien Alet, “Many-body localization in a quasiperiodic fibonacci chain,” SciPost Physics 6, 050 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [13] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Setiawan, Dong-Ling Deng, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Pixley, “Trans- port properties across the many-body localization tran- sition in quasiperiodic and random systems,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 96, 104205 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [14] Bill Sutherland, “Self-similar ground-state wave function for electrons on a two-dimensional penrose lattice,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 34, 3904–3909 (1986).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [15] Nicolas Mac´e, Anuradha Jagannathan, Pavel Kalugin, R´emy Mosseri, and Fr´ed´eric Pi´echon, “Critical eigen- states and their properties in one- and two-dimensional quasicrystals,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 96, 045138 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [16] Jerome Lloyd, Sounak Biswas, Steven H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Simon, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Parameswaran, and Felix Flicker, “Statistical mechan- ics of dimers on quasiperiodic Ammann-Beenker tilings,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 106 (2022), 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='1103/physrevb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='094202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [17] Grace M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Sommers, Michael J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Gullans, and David A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Huse, “Self-dual quasiperiodic percolation,” arXiv e- prints , arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='11290 (2022), arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='11290 [cond-mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='stat-mech].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [18] Didier Sornette, “Discrete-scale invariance and complex dimensions,” Physics reports 297, 239–270 (1998).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [19] Jeremy T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Young, Alexey V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Gorshkov, Michael Foss- Feig, and Mohammad F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Maghrebi, “Nonequilibrium fixed points of coupled ising models,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' X 10, 011039 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [20] Jonas F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Karcher, Noah Charles, Ilya A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Gruzberg, and Alexander D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Mirlin, “Generalized multifractality at spin quantum Hall transition,” Annals of Physics 435, 168584 (2021), special issue on Philip W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Anderson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [21] Shai M Chester, Walter Landry, Junyu Liu, David Poland, David Simmons-Duffin, Ning Su, and Alessan- dro Vichi, “Carving out ope space and precise o (2) model critical exponents,” Journal of High Energy Physics 2020, 1–52 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [22] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lipa, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Swanson, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Nissen, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Chui, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Israelsson, “Heat capacity and thermal relax- ation of bulk helium very near the lambda point,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 76, 944–947 (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [23] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' M Luck, “A classification of critical phenomena on quasi-crystals and other aperiodic structures,” Euro- physics Letters 24, 359–364 (1993).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [24] Ronaldo N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Ara´ujo and Eric C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Andrade, “Conventional superconductivity in quasicrystals,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 100, 014510 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [25] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Anderson, “The Resonating Valence Bond State in La2CuO4 and Superconductivity,” Science 235, 1196– 1198 (1987).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [26] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Jumper and et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=', “Highly accurate protein structure prediction with AlphaFold,” Nature 596, 583–589 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [27] Alvaro Sanchez-Gonzalez, Jonathan Godwin, Tobias Pfaff, Rex Ying, Jure Leskovec, and Peter Battaglia, “Learning to simulate complex physics with graph net- works,” in Proceedings of the 37th International Con- ference on Machine Learning, Proceedings of Machine Learning Research, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 119, edited by Hal Daum´e III and Aarti Singh (PMLR, 2020) pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 8459–8468.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [28] Frank No´e, Alexandre Tkatchenko, Klaus-Robert M¨uller, and Cecilia Clementi, “Machine learning for molecular simulation,” Annual Review of Physical Chemistry 71, 361–390 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [29] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Cubuk, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Schoenholz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rieser, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Mal- one, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rottler, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Durian, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Kaxiras, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Liu, “Identifying structural flow defects in disordered solids using machine-learning methods,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 114, 108001 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [30] Giacomo Torlai, Guglielmo Mazzola, Juan Carrasquilla, Matthias Troyer, Roger Melko, and Giuseppe Carleo, “Neural-network quantum state tomography,” Nature Physics 14, 447–450 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [31] Marin Bukov, Alexandre G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Day, Dries Sels, Phillip Weinberg, Anatoli Polkovnikov, and Pankaj Mehta, “Re- inforcement learning in different phases of quantum con- trol,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' X 8, 031086 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [32] Thomas F¨osel, Petru Tighineanu, Talitha Weiss, and Florian Marquardt, “Reinforcement learning with neural networks for quantum feedback,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' X 8, 031084 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [33] Denis Boyda, Gurtej Kanwar, S´ebastien Racani`ere, Danilo Jimenez Rezende, Michael S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Albergo, Kyle Cran- mer, Daniel C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Hackett, and Phiala E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Shanahan, “Sam- pling using SU(n) gauge equivariant flows,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' D 103, 074504 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [34] Giuseppe Carleo, Ignacio Cirac, Kyle Cranmer, Lau- rent Daudet, Maria Schuld, Naftali Tishby, Leslie Vogt- Maranto, and Lenka Zdeborov´a, “Machine learning and the physical sciences,” Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 91, 045002 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [35] Amit Gordon, Aditya Banerjee, Maciej Koch-Janusz, and Zohar Ringel, “Relevance in the renormalization group and in information theory,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 126, 240601 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [36] Doruk Efe G¨okmen, Zohar Ringel, Sebastian D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Huber, and Maciej Koch-Janusz, “Statistical physics through the lens of real-space mutual information,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 127, 240603 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [37] Doruk Efe G¨okmen, Zohar Ringel, Sebastian D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Huber, and Maciej Koch-Janusz, “Symmetries and phase dia- grams with real-space mutual information neural estima- tion,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' E 104, 064106 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [38] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Biswas and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='A Parameswaran, “Discrete scale invari- ant fixed point in a classical quasiperiodic dimer model,” (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 7 [39] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Grunbaum and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Shephard, Tilings and Patterns (W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Freeman and Company, 1986).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [40] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Penrose, “The role of aesthetics in pure and applied mathematical research,” Bull.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 10, 266– 271 (1974).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [41] Marjorie Senechal, Quasicrystals and Geometry (Cam- bridge University Press, 1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [42] Daniel S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rokhsar and Steven A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Kivelson, “Supercon- ductivity and the quantum hard-core dimer gas,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 61, 2376–2379 (1988).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [43] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Moessner and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Sondhi, “Resonating Valence Bond Phase in the Triangular Lattice Quantum Dimer Model,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 86, 1881–1884 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [44] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Moessner, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Sondhi, and Eduardo Fradkin, “Short-ranged resonating valence bond physics, quantum dimer models, and ising gauge theories,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' B 65, 024504 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [45] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Senthil, Ashvin Vishwanath, Leon Balents, Subir Sachdev, and Matthew P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Fisher, “Deconfined quan- tum critical points,” Science 303, 1490–1494 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [46] Fabien Alet, Jesper Lykke Jacobsen, Gr´egoire Misguich, Vincent Pasquier, Fr´ed´eric Mila, and Matthias Troyer, “Interacting Classical Dimers on the Square Lattice,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 94, 235702 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [47] Fabien Alet, Yacine Ikhlef, Jesper Lykke Jacobsen, Gr´egoire Misguich, and Vincent Pasquier, “Classical dimers with aligning interactions on the square lattice,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' E 74, 041124 (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [48] Richard Kenyon and Andrei Okounkov, “What is a dimer?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Notices of the AMS 52 (2005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [49] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Kasteleyn, “The statistics of dimers on a lattice: I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The number of dimer arrangements on a quadratic lat- tice,” Physica 27, 1209–1225 (1961).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [50] Henry Cohn, Richard Kenyon, and James Propp, “A variational principle for domino tilings,” Journal of the American Mathematical Society 14, 297–346 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [51] Richard Kenyon, Andrei Okounkov, and Scott Sheffield, “Dimers and amoebae,” Annals of Mathematics 163, 1019–1056 (2006).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [52] Richard Kenyon and Andrei Okounkov, “Limit shapes and the complex Burgers equation,” Acta.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 199, 263–303 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [53] Felix Flicker, Steven H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Simon, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Parameswaran, “Classical Dimers on Penrose Tilings,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' X 10 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [54] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Tishby, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Pereira, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Bialek, “The informa- tion bottleneck method,” in Proceedings of the 37th Aller- ton Conference on Communication, Control and Compu- tation, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 49 (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [55] Doruk Efe G¨okmen, Zohar Ringel, Sebastian D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Huber, and Maciej Koch-Janusz, “RSMI-NE/RSMI-NE,” (2021- 2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [56] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Jagannathan, “Quantum spins and quasiperiodicity: A real space renormalization group approach,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 92, 047202 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [57] Interestingly, this solves the four dimensional ‘coil in the box’ problem familiar from coding theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [58] Maciej Koch-Janusz and Zohar Ringel, “Mutual informa- tion, neural networks and the renormalization group,” Nature Physics 14, 578–582 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [59] Patrick M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Lenggenhager, Doruk Efe G¨okmen, Zohar Ringel, Sebastian D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Huber, and Maciej Koch-Janusz, “Optimal renormalization group transformation from in- formation theory,” Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' X 10, 011037 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' [60] NetworkX developers, “NetworkX: Network Analysis in Python,” (2014-2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Supplemental materials Appendix A: Methods a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Real-space mutual information based coarse- graining – The method used to construct the effec- tive degrees of freedom (DOF) is an extension of the RSMI approach first introduced by some of the authors in [58].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' A system of microscopic DOFs is described by a (large dimensional) random variable X distributed ac- cording to some joint probability distriburion P (X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' A coarse-graining (CG) of a partition X = � i Vi into new variables X ′ = � i Hi is then defined as a conditional probability distribution P(X ′|X) = � i PΛi(Hi|Vi) where X ′ = � i Hi and where the product is over individual CG of a blocks variables Vi �→ Hi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We note here there is a distinction between the spatial patch V (part of the lattice), denoted with regular font, and a configurations of DOFs supported on it this patch (a random variable) V, denoted with calligraphic font.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' For brevity we used a single notation in the main text, as the correct meaning is implied by the context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Maximisation of the real-space mutual information (RSMI) IΛ(H : E) = E(H,E) [log PΛ(H, E) − log PΛ(H)P(E)] between H and its distant environment E provides a vari- ational principle for the CG map Λ to distill the most relevant long-range features [35, 59].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The information capacity of H is compressed by the constraint of a prede- termined number of bits, thereby providing an approxi- mation of the information bottleneck problem [35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This computationally difficult variational principle can nevertheless be efficiently implemented with differen- tiable parametric lower bounds on mutual information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Such bounds are parametrised by deep NN, and op- timised simultaneously with the parameters Λ of the coarse-graining using stochastic gradient descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This is the RSMI-NE algorithm, which some of the authors introduced recently [36, 37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Here we extended this formalism and the RSMI-NE package to systems on arbitrary static graphs by cast- ing the configurations into vectors according to the fixed coordinate system defined by the graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The new graph- enabled RSMI-NE code using the NetworkX backend [60] is available publicly [55].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Ansatz – We used PΛ(H|V) with an inner- product ansatz H := sign(Λ·V), parametrised by a linear NN Λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Furthermore, for mapping binary variables we used annealed Gumbel-softmax reparametrisation with exponential relaxation rate 5 × 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The critic function in the variational RSMI lower-bound is implemented us- ing a separable architecture f(H, E) = u(H)Tv(E) 8 where u and v are two-layer deep NNs with hidden di- mension 16 and output dimension 8 (the latter is con- tracted in the inner product of the two networks).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We trained the neural networks using stochastic gradient de- scent with learning rate 10−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The coarse-grained block variable V at a given scale δn is defined on the σn inflated tiles V shown with different colours in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='2a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The environment regions E, are defined as a shell with radius given by a fixed graph-distance from the centre of V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In particular for δ2, E is defined by an inner radius LEi = 9 and outer radius LEo = 24, whereas for δ4 we used LEi = 40, LEo = 64, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='S2 we show examples of corresponding σ−4 coarse-graining filters optimised for these regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Appendix B: The odd scales Our analysis of the coarse graining transformations of the dimer model on the AB tiling did not find evidence for a discrete scale invariant description in terms of super- dimer variables under all rescalings, but only for even order ones (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' under deflations σ−2k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' This is in contrast to the AB tiling itself (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' just the AB lattice), which is invariant under any order of deflation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' In addition, our method finds quantitatively and qual- itatively distinct behaviour at odd orders σ−1 and σ−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The maximal mutual information IΛ(H : E) attained for the coarse graining at a 3-supervertex is non-monotonic, exhibiting, within error, two distinct values characteriz- ing the even and odd scales (with the odd scales’ infor- mation reduced by almost a factor of two),as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Furthermore, the optimised coarse graining does not yield a well-defined three-state clock variable at odd scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Indeed, for even scales the optimisation robustly yields a well-defined set of three clusters (correspond- ing to the three clock states) even in the distribution of pre-activations Λ · V, while at odd scales the distribu- tion of pre-activations lacks any such clear structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' We emphasize that this is not an optimization issue: com- putationally, σ−4 coarse graining is a more challenging problem than σ−3 (because of much large size of random variables involved).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 9 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Block and environment regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Highlighted in blue are examples of the coarse-graining blocks V , and their annular environment regions E used for the 3- (a) and 8-vertices (b) at the largest scale considered (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' δ4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The microscopic lattice, and the σ−2 superlattice are shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The centers of the “kite” and “star” shaped regions V are at the 8-vertices whose positions form the σ−4 superlattice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' S2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Optimal σ−4 coarse-graining transformations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (a) 8-supervertex filters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' (b) 3-supervertex filters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' 10 1 2 3 4 scale transformation 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content='9 I ( : ) (bits) FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' Mutual information across different scale transformations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' The maximal MI for the coarse graining at a 3-supervertex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} +page_content=' For odd order rescaling transformations σ−1,−3, the information attained by the compression is sys- tematically lower compared to the even ones σ−2,−4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/v9FKT4oBgHgl3EQf4i7d/content/2301.11934v1.pdf'} diff --git a/vNAyT4oBgHgl3EQfafc7/content/2301.00242v1.pdf b/vNAyT4oBgHgl3EQfafc7/content/2301.00242v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4aa7506f9ef9ef2290730924bad2502f4c46eef9 --- /dev/null +++ b/vNAyT4oBgHgl3EQfafc7/content/2301.00242v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aab96e203e11a5435aeb31351421ce38aa6bde5daed301c146d2b8a75b2c250a +size 454419 diff --git a/vNAyT4oBgHgl3EQfafc7/vector_store/index.faiss b/vNAyT4oBgHgl3EQfafc7/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..d8040ae4d2e3971bcb3587da18a45b0d91278861 --- /dev/null +++ b/vNAyT4oBgHgl3EQfafc7/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46bd92f0cd1ad6e89321064e23b486e573b872f65d6d975db5d344b550786f77 +size 1310765 diff --git a/vNAyT4oBgHgl3EQfafc7/vector_store/index.pkl b/vNAyT4oBgHgl3EQfafc7/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..0b6c89788d1c9f248040c01177fcd918077b9dd4 --- /dev/null +++ b/vNAyT4oBgHgl3EQfafc7/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eda17e58ac4570fa3096392c50ea0a470cee5896db83ac63449fb86b6c9d386 +size 53610 diff --git a/vdE4T4oBgHgl3EQfXAw5/vector_store/index.faiss b/vdE4T4oBgHgl3EQfXAw5/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..5552e9628f19b207a733c7985e6755109b67fdd3 --- /dev/null +++ b/vdE4T4oBgHgl3EQfXAw5/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9355f1c13f1bedbdffa81ba93ac55d23bfcf497422ff464ff0b7246a05a76ded +size 2555949 diff --git a/vdE4T4oBgHgl3EQfXAw5/vector_store/index.pkl b/vdE4T4oBgHgl3EQfXAw5/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7d60c0bf0dd52dc499341512e4e0948ffdf75402 --- /dev/null +++ b/vdE4T4oBgHgl3EQfXAw5/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5945e3d7ce790d56a39fa4744a5f6e357c76cfdaa1fa74b94c2478d8d774ad6b +size 89663 diff --git a/vtAzT4oBgHgl3EQfB_rN/content/tmp_files/2301.00953v1.pdf.txt b/vtAzT4oBgHgl3EQfB_rN/content/tmp_files/2301.00953v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..f45418fde10deaf8a089ed68d75ce50518d89421 --- /dev/null +++ b/vtAzT4oBgHgl3EQfB_rN/content/tmp_files/2301.00953v1.pdf.txt @@ -0,0 +1,4091 @@ +Carrollian Yang-Mills Theory +Minhajul Islam +Indian Institute of Technology Kanpur, Kalyanpur, Kanpur 208016. INDIA. +E-mail: +minhajul@iitk.ac.in +Abstract: By doing a small c (speed of light) expansion of SU(N) Yang-Mills fields, +we construct two different electric and two different magnetic sectors actions of Carrollian +Yang-Mills theory. For both electric and magnetic cases, one sector contains non-trivial +self-interaction, and another is N2 −1 copies of respective sector Carrollian abelian theory. +In d = 4 , all the four sectors are invariant under infinite Carrollian Conformal symmetry. +There are no central extensions when analyzing charge algebra at the phase space level. +Lastly, we compute propagators for all four sectors and vertices for two non-trivial sectors. +Propagators in position space show ultra-local behavior. +arXiv:2301.00953v1 [hep-th] 3 Jan 2023 + +Contents +1 +Introduction +1 +2 +Carrollian Conformal Algebra and Representation +4 +3 +Yang-Mills action and small c-expansion +6 +4 +Carrollian Yang-Mills actions +9 +4.1 +Electric Action I +9 +4.2 +Electric Action II +11 +4.3 +Magnetic Action I +14 +4.4 +Magnetic Action II +17 +5 +Noether charges and Charge algebra +20 +6 +Propagator and Vertices +21 +6.1 +Electric Sector I +22 +6.2 +Magnetic sector I +23 +6.3 +Electric sector II +24 +6.4 +Magnetic sector II +25 +6.5 +Propagators in position space +27 +7 +Conclusions and Discussions +28 +A Rotation and Boost invariance +29 +B Charge Algebra +31 +C Discussion on previous work on Carrollian Yang-Mills theory +33 +1 +Introduction +The construction of the spectacularly successful Standard Model of particle physics, which +describes nature around us, is based on the foundation of relativistic quantum field theory +(QFT). But, often, to describe real life systems, it is desirable to look at approximations +and limits of the more fundamental theory. +Gauge theories are the backbone of theoretical physics. Three of the four fundamen- +tal forces of nature are explained by Yang-Mills theory. +Even the first example of the +most promising formalism to understand Quantum gravity, called AdS/CFT holographic +duality, is constructed using a supersymmetric version of Yang-Mills theory [1] . +The +– 1 – + +AdS/CFT holographic duality relates the d + 1-dimensional gravitational theory to the +d-dimensional field theory. More specifically, [1] connects a string theory living on five- +dimensional Anti-de Sitter (AdS) spacetimes (times a five-sphere) and N = 4 SU(N) Su- +persymmetric Yang-Mills (SYM) theory which is a four-dimensional conformal field theory +living on the boundary of AdS. +In this paper, we will look at Yang-Mills theories from a different perspective. We will +attempt to understand the theory in the limit when the speed of light goes to zero. The +diametrically opposite limit, where c → ∞ is clearly of physical interest as it describes +Galilean or non-relativistic (NR) physics, and is useful to describe a range of day to day +physical systems like hydrodynamics. +Below we clarify why the other limit, called the +Carrollian limit, is important. +If we adopt a group-theoretic approach to understand QFT at these two different +(Galilean and Carrollian) limits, we would begin from the Poincar´e algebra and take the +large c (speed of light) limit and small c limit. The two symmetry algebras that would +be obtained as a result are different and are the familiar Galilean algebra, and the not- +so-familiar Carrollian algebra. +In both these limits, many interesting counter-intuitive +concepts emerge. In both cases, spacetime metrics degenerate, light-cones open up for non- +relativistic theory and close up for Carrollian theory, and symmetry algebra gets enhanced. +Non-relativistic theories, corresponding to c → ∞, are important for condensed matter +physics, non-AdS holography, and hydrodynamics. In this limit, as mentioned previously +the metric degenerates, spacetime loses its Reimmanian structure, and a new spacetime +structure emerges called Newton-Cartan spacetime. Selected references on the construction +of non-relativistic field theories and related Newton-Cartan spacetime structures are [2– +6]. In connection with the construction of symmetries, one of the interesting techniques +to construct non-relativistic physics is to start from a Poincar´e invariant theory and do a +large c-expansion. Using this approach we get many interesting insights into non-relativistic +physics like order-wise enhanced symmetry algebra, and actions [6–9]. +Our main focus in this paper is the other limit corresponding to c → 0, which is +called the Carrollian limit. At first sight, sending the speed of light to zero may seem +unnatural and the expectation is that this would lead to unphysical models. But recently, +this particular limit has been resurgent with different applications, mainly connected to +the understanding of flat space holography [10]. As mentioned before, one of the most +promising tools to understand Quantum gravity is the AdS/CFT duality. In the limit of +infinite radius of curvature, AdS spacetime become flat spacetime. On the dual side, the +infinite radius limit corresponds to sending the speed of light to zero [11]. The boundary +theory thus becomes a Carrollian conformal field theory. Some important references for +holography for asymptotically flat spacetime are [10–21]. The understanding of flat space +holography recently has taken two different directions, viz. Celestial holography and Car- +rollian holography. Celestial holography relates gravity in 4d asymptotically flat spacetimes +to a 2d CFT living on the celestial sphere [22–24]. On the other hand, Carrollian hologra- +phy relates 4d asymptotically flat gravity to 3d Carrollian CFTs living on the entire null +– 2 – + +boundary of 4d bulk spacetime [15, 25–31]. Recently, some fascinating works have been +done to connect both formalisms [32, 33]. +The most successful example of AdS/CFT is the original Maldacena correspondence +relating N = 4 SU(N) Supersymmetric Yang-Mills theory in d = 4 to gravity in AdS5. +One of our long-term goals is to understand the flatspace version of the Maldacena corre- +spondence. As an important intermediate step, we wish to construct the Carrollian version +of Super-Yang-Mills theory. This is the main motivation for constructing Carrollian Yang- +Mills (CYM) theory and, in particular, actions for CYM in this paper. +Carrollian physics has also emerged in other interesting places and here we quickly +summarize these exciting developments. Carrollian structure appear on any null hyper- +surface. Every black hole solutions of general relativity contains a horizon that is nothing +but a null surface. Carrollian structures on black hole horizons have been considered in +[34]. Carrollian gravity may provide a tractable limit of general relativity and be useful +for various physical context. +This has been studied in [35–41]. +Carroll theory is also +important for cosmology, inflation [42], fluid mechanics [20, 39, 41, 43–47], fractons [48– +50], flat physics in condensed matter systems [51]. +Inspired by large c-expansion and +construction of non-relativistic physics, small c-expansion was introduced to understand +Carrollian physics in [52]. +Finally, the Carrollian limit of the string theory worldsheet +leads to the very high energy tensionless regime of strings. This has been investigated in +detail in [53–57]. Recently there has been some interesting work done on Carroll fermions +[51, 58–60]. +Before moving on to Carrollian gauge theories, which will be the focus in this paper, +we briefly recall previous works on Galilean gauge theories. Galilean gauge theory for U(1) +theory was first constructed long ago [61]. In [62–64] authors realized infinite-dimensional +Galilean conformal symmetry at the level of equations of motion in Galilean abelian and +Galilean Yang-Mills theory. Subsequently there is some detailed work on action construc- +tions for both Galilean abelian [65, 66] and Yang-Mills theory [67]. Quantum properties of +Galilean scalar electrodynamics were studied in [68] and that of Galilean QED in [69]. +The Carrollian algebra was first discussed in [70, 71]. More recently Carroll confor- +mal structures have been analyzed at the level equations of motion in [15, 27–29]. In [72] +Carrollian action was constructed for the so-called electric abelian theory, which is an in- +teracting field theory with scalar field [42, 73]. Using the small c-expansion, the magnetic +sector of Carrollian abelian theory has been recently constructed [42]. The conformal struc- +ture of this magnetic action was analyzed. In [74] authors constructed off-shell Carrollian +Yang-Mills theory in the Hamiltonian formulation. However, at present there is no action +formulation for the Carrollian Yang-Mills theory. +In this paper, we construct Carrollian Yang-Mills actions using the small c-expansion +technique. We find four different sectors of Carrollian Yang-Mills theory. This construction +depends on the power of c we consider during field expansion. All four sectors exhibit infi- +nite Carrollian conformal invariance in four spacetime dimensions. The energy-momentum +tensors for all four sectors are analyzed, and their conservation is established using equa- +– 3 – + +tions of motion and Bianchi identities. To see charge algebra, we calculate charges for all +the four sectors and show that the symmetry is realized at the level of charge algebra. +We begin our investigation of the quantum properties of the theory and calculate all the +propagators and vertices. A detailed quantum mechanical analysis is kept for future work. +Outline of the paper +The paper is organized as follows. We begin in Sec.2 with a review of Carrollian conformal +algebra (CCA). After that, we talk about an infinite extension of the CCA. +In Sec.3 we address relativistic Yang-Mills theory and its small c-expansion. We take +expansion of fields as Aa +µ = �∞ +n=0 cλc2nAa(n) +µ +, where λ is a non-negative constant parameter. +Using λ = 0, we get the electric and the magnetic sectors of CYM with a non-trivial term +or self-interaction term. For λ with any non-zero value, we get copies of the abelian electric +and the abelian magnetic sectors. Here for any non-zero value, we choose the lowest even +integer value two, which is explained in detail. +In Sec.4, we address details of all the sectors of CYM action. For each sector, firstly, +we give the action in a compact form, and write the equations of motion, and the gauge +symmetry. After that, we show its invariance under infinite CCA in four spacetime dimen- +sions. Finally, we analyze the energy-momentum tensor with its improved version and its +conversation. In Sec.5, we calculate Noether charges and check the charge algebra for these +actions. +In Sec.6 we briefly discuss Feynman rules for propagators and vertices for all the four +sectors along with the Feynman diagrams. In this section, we also talk about propagators +in position space. In Sec.7 we conclude with a summary of our results and a list of future +directions. +2 +Carrollian Conformal Algebra and Representation +The UR or Carrollian symmetry can be obtained by performing an In¨on¨u–Wigner con- +traction on the relativistic conformal generators. The corresponding contraction of the +spacetime coordinates for a d-dimensional CFT is described as +xi → xi, +t → ϵ t; +ϵ → 0 . +(2.1) +Here, i runs over the spatial coordinates i = 1, . . . , d − 1. +The above contraction can +also be interpreted as taking the limit of vanishing speed of light, c → 0. The Carrollian +generators are obtained by performing the space-time contraction on the parent relativistic +generators. For example, we obtain Carrollian boost generator +Brel +i += −xi∂t − t∂i +using (2.1) +−−−−−−→ −1 +ϵ xi∂t − t∂i +redefined Bi +−−−−−−−→ Bi = lim +ϵ→0 ϵBrel +i +Carroll +−−−−→ +limit +Bi = −xi∂t . +(2.2) +– 4 – + +The other Carrollian generators are also obtained by doing the analysis like above. They +are given by +H = ∂t, +Bi = −xi∂t, +Ki = −2xj(t∂t + xi∂i) + xjxj∂i, +K = xixi∂t, +(2.3a) +D = −(t∂t + xi∂i), +Pi = ∂i, +Jij = −(xi∂j − xj∂i) . +(2.3b) +These generate the finite Conformal Carrollian Algebra (f-CCA), which is iso(d, 1) for a +d-dimensional field theory [15, 27]: +[Jij, Bk] = δk[iBj], +[Jij, Pk] = δk[iPj], +[Jij, Kk] = δk[iKj], +[Bi, Pj] = δijH, +[Bi, Kj] = δijK, +[D, K] = −K, +[K, Pi] = 2Bi, +[Ki, Pj] = −2Dδij − 2Jij, +[H, Ki] = 2Bi, +[D, H] = H, +[D, Pi] = Pi, +[D, Ki] = −Ki. +(2.4) +The sub-algebra consisting of the generators {Jij, Bi, Pi, H} forms the c → 0 limit of the +Poincar´e algebra viz. the Carrollian algebra [75]. +Unlike the relativistic conformal algebra, even in dimensions greater than two, it is possible +to give the finite algebra in (2.4) an infinite-dimensional lift by introducing time translation +generator with arbitrary spatial dependence +Mf = f(xi)∂t . +(2.5) +Here, Mf generates the infinite set of super-translations. In the above expression f(xi) +is an arbitrary function of the spatial co-ordinates xi, which we restrict to polynomials. +We obtain the finite generators of f-CCA, i.e., Mf = H, Bi, K when f(xi) = 1, −xi, xkxk +respectively. The super-translation generators Mf along with the finite set of generators +{Bi, Jij, H, Pi, D, K, Ki} describe the infinite-dimensional CCA. For d ≥ 4 it can be written +as [15, 72]: +[Pi, Mf] = M∂if, +[D, Mf] = M(−xi∂if+f), +(2.6a) +[Ki, Mf] = M2xif+xkxk∂if−2xixk∂kf, +[Jij, Mf] = M−x[i∂j]f . +(2.6b) +For more details of the algebraic aspects of Carrollian conformal symmetry, the reader is +pointed to [15]. In this paper our focus is on spacetime dimension d = 4. +Representation theory +The representation theory of the CCA based on highest weights was first constructed in +[15]. Further analysis on representation extended to fields of different integer and half- +integer spins was given in [27]. For the CCA, the states are labeled with the eigenvalues +of rotation and dilatation generators. The construction of representation is summarized +below. +The Carrollian CFT fields are labeled with scaling dimension ∆ and spin j as +[D, Φ(0, 0)] = ∆Φ(0, 0), +[J2, Φ(0, 0)] = j(j + 1)Φ(0, 0) . +(2.7) +– 5 – + +The action on a generic field of Carrollian rotation, space- and time-translation is given +by +[Jij, Φ(0, 0)] = ΣijΦ(0, 0), +[H, Φ(t, xi)] = ∂tΦ(t, xi), +[Pi, Φ(t, xi)] = ∂iΦ(t, xi) . +(2.8) +The Carrollian conformal primaries are defined as +[Ki, Φ(0, 0)] = 0, [K, Φ(0, 0)] = 0, +[Mf, Φ(0, 0)] = 0 for polynomial degree > 1 . +(2.9) +The Carrollian boost acts on the primary non-trivially because the fields are not eigenstates +of Carrollian boosts. The transformation of a generic field under Carrollian boosts can be +written using the Jacobi identity. The action of Carroll boost on the fields is +[Bi, Φ(0, 0)] = rϕi + sσiφ + s′σiχ + aAtδji + bAi + . . . , +(2.10) +where ϕ, {φ, χ}, {At, Ak} denote the primary fields of different spins (0, 1 +2, 1). The con- +stants r, {s, s′}, {a, b} cannot be determined just from the symmetries, but can only be fixed +though dynamics. One way to determine them is the limit c → 0 of the dynamics of the +corresponding relativistic theory. The above action of the Carroll boost can be generalized +for any spin theory. +We use the conventional way to define a primary field Φ(t, xi) for the CCA at any spacetime +point from the origin as +Φ(t, x) = UΦ(0, 0)U −1, +where U = e−tH−xiPi. +(2.11) +The action of all the generators of the finite and infinite CCA on this generic Carrollian +primary Φ(t, xi) can be written as +[Jij, Φ(t, xi)] = (xi∂j − xj∂i)Φ(t, x) + ΣijΦ(t, xi), +(2.12a) +[Bj, Φ(t, xi)] = xj∂tΦ(t, x) − U[Bj, Φ(0, 0)]U −1, +(2.12b) +[D, Φ(t, xi)] = (t∂t + xi∂i + ∆)Φ(t, xi), +(2.12c) +[Kj, Φ(t, xi)] = (2∆xj + 2xjt∂t + 2xixj∂i − 2xiΣij − xixi∂j) Φ(t, x) +− 2t U[Bj, Φ(0, 0)]U −1, +(2.12d) +[Mf, Φ(t, x)] = f(xi)∂tΦ(t, x) + ∂jf U[Bj, Φ(0, 0)]U −1. +(2.12e) +This is a summary of CCA and its representation, which we have used extensively in the +following sections. We will see for our example what are the constants used in equation +(2.10). +3 +Yang-Mills action and small c-expansion +The Yang-Mills theory in (d + 1)-dimensions is described by the action +SY M = +� +dd+1x LY M = +� +dd+1x +� +− 1 +4F µνaF a +µν +� +, +(3.1) +– 6 – + +and the equations of motion +∂µF µνa + gfabcAb +µF µ˜νc = 0, +(3.2) +where a = 1, 2, ..., N2 − 1. +The non-abelian field strength (F a +µν) is defined as F a +µν = +∂µAa +ν − ∂νAa +µ + gfabcAb +µAc +ν. Here, Aa +µ is the gauge field and fabc are structure constants of +the underlying gauge group. +If we write the above action making the speed of light c explicit, the resulting action +is 1 +S = +� +dd+1x +� ++ 1 +2c2 F a +tiF a +ti − 1 +4F ijaF a +ij +� +. +(3.3) +The action is divided into two parts. +The first part contains the temporal component +of the gauge field (Aa +t ) along with the spatial components (Aa +i ). The second part is just +dependent on the spatial components (Aa +i ) of the gauge field. To proceed with the small +c-expansion, we write the gauge fields as an expansion in c as +Aa +t = +∞ +� +n=0 +cλc2naa(n) +t +, +Aa +i = +∞ +� +n=0 +cλc2naa(n) +i +. +(3.4) +Using these expansions, the first part of the Lagrangian is +1 +2c2 F a +tiF a +ti = 1 +2 +� +c2λ−2 +∞ +� +n,m=0 +(∂taa(n) +i +− ∂iaa(n) +t +)(∂taa(m) +i +− ∂iaa(m) +t +) + c3λ−2 +∞ +� +n,m,l=0 +2gfabc +(∂taa(n) +i +− ∂iaa(n) +t +)ab(m) +t +ac(l) +i ++ c4λ−2 +∞ +� +n,m,l,p=0 +g2fabcfadeab(n) +t +ac(m) +i +ad(l) +t +ae(p) +i +� +.(3.5) +If we look at only the first term above, i.e. for the abelian case, cλ becomes an overall +factor and there will be only one result for different λ [42]. However, because of the self- +interaction terms of the gauge fields in the second and third terms, we can not take out cλ +as an overall factor. This leads to distinct sectors of Carroll invariant non-abelian gauge +theories corresponding to λ = 0 and λ ̸= 0. Similarly, the second part (fully spatial part) +of the Lagrangian is +−1 +4F a +ijF a +ij = −1 +4 +� +c2λ +∞ +� +n,m=0 +(∂iaa(n) +j +− ∂jaa(n) +i +)(∂iaa(m) +j +− ∂jaa(m) +i +) + c3λ +∞ +� +n,m,l=0 +2gfabc(∂iaa(n) +j +−∂jaa(n) +i +)ab(m) +i +ac(l) +j ++ c4λ +∞ +� +n,m,l,p=0 +g2fabcfadeab(n) +i +ac(m) +j +ad(l) +i +ae(p) +j +� +. +(3.6) +We generally expand Lagrangian in even powers of c. If the relativistic action did not +contain any self-interaction term, there would not have been any problem. We could have +just taken cλ outside and written the Lagrangian in even power of c. But in our case, to +1x0 = ct so ∂0 = 1 +c ∂t and A0 = 1 +c At +– 7 – + +write the expansions in Eq.(3.5) and Eq.(3.6) in even powers of c, we have to choose λ as +an even integer. We thus define λ = 2δ. Then the two parts of the action become +1 +2c2 F a +tiF a +ti = 1 +2 +� +c4δ−2 +∞ +� +n,m=0 +() + c6δ−2 +∞ +� +n,m,l=0 +() + c8δ−2 +∞ +� +n,m,l,p=0 +() +� +, +(3.7a) +−1 +4F a +ijF a +ij = −1 +4 +� +c4δ +∞ +� +n,m=0 +() + c6δ +∞ +� +n,m,l=0 +() + c8δ +∞ +� +n,m,l,p=0 +() +� +, +(3.7b) +where () is a shorthand for the corresponding terms in Eq.(3.5) and Eq.(3.6). Now every +term looks good. As argued earlier, λ = 0 and λ ̸= 0 (correspondingly δ = 0 and δ ̸= 0) +give two distinct sectors. For δ = 0, the resultant Carrollian actions describe non-abelian +theories, i.e. these include the self-interaction terms, whereas for δ ̸= 0, the resultant +Carrollian actions describe copies of the Carrollian abelian theory. +For δ = 0, the leading order Lagrangian, i.e. the coefficient of c−2 in (3.7) is +L(0) = 1 +2 +� +(∂taa(0) +i +− ∂iaa(0) +t +)(∂taa(0) +i +− ∂iaa(0) +t +) + 2gfabc(∂taa(0) +i +− ∂iaa(0) +t +)ab(0) +t +ac(0) +i ++g2fabcfadeab(0) +t +ac(0) +i +ad(0) +t +ae(0) +i +� +, (3.8) +and this is called the electric sector. The next-to-leading order (NLO) Lagrangian (i.e. the +coefficient of c0 in (3.7)), which is called the magnetic sector, is given by +L(1) = +� +∂taa(1) +i +− ∂iaa(1) +t +� +Ea(0) +i ++ gfabc� +∂taa(0) +i +− ∂iaa(0) +t +�� +ab(0) +t +ac(1) +i ++ ab(1) +t +ac(0) +i +� ++g2 +2 fabcfade� +ab(1) +t +ac(0) +i +ad(0) +t +ae(0) +i ++ ab(0) +t +ac(1) +i +ad(0) +t +ae(0) +i ++ ab(0) +t +ac(0) +i +ad(1) +t +ae(0) +i ++ab(0) +t +ac(0) +i +ad(0) +t +ae(1) +i +� +− 1 +4fija(0)fa(0) +ij +, +(3.9) +where Ea(0) +i += ∂taa(0) +i +−∂iaa(0) +t ++gfabcaa(0) +t +aa(0) +i +and fa(0) +ij += ∂iaa(0) +j +−∂jaa(0) +i ++gfabcaa(0) +i +aa(0) +j +. +For δ ̸= 0, all values of δ are equivalent, and thus we take δ = 1 for simplicity. For δ = 1, +we get that the total Lagrangian in (3.7) has an expansion: +L = c2 ˜L0 + c4 ˜L1 + ..., +where the leading order Lagrangian (coefficient of c2) is +˜L(0) = 1 +2(∂taa(0) +i +− ∂iaa(0) +t +)(∂taa(0) +i +− ∂iaa(0) +t +), +(3.10) +and the next-to-leading order Lagrangian (coefficient of c4) is +L(1) = ˜Ea(1) +i +˜Ea(0) +i ++ gfabc ˜Ea(0) +i +ab(0) +t +ac(0) +i +− 1 +4 +˜fa(0) +ij +˜fa(0) +ij +. +(3.11) +Here ˜Ea(0) +i += (∂taa(0) +i +− ∂iaa(0) +t +), ˜Ea(1) +i += (∂taa(1) +i +− ∂iaa(1) +t +) and ˜fa(0) +ij += (∂iaa(0) +j +− ∂jaa(0) +i +). +Thus, taking λ (i.e. δ) to be zero or non-zero, we have obtained four Lagrangians: two of +these are the so-called electric sector, and the other two are the so-called magnetic sector. +In the following sections, we will give details of all the four sectors. +– 8 – + +4 +Carrollian Yang-Mills actions +4.1 +Electric Action I +If we take δ = 1 in equation (3.7), we can see the Leading order Lagrangian (coefficient of +c2) is given by +˜L(0) = 1 +2(∂taa(0) +i +− ∂iaa(0) +t +)(∂taa(0) +i +− ∂iaa(0) +t +) = 1 +2 +˜Ea(0) +i +˜Ea(0) +i +, +(4.1) +where ˜Ea(0) +i += (∂taa(0) +i +− ∂iaa(0) +t +). Unlike the δ = 0 case that we will study in the next sub- +section where the electric sector contains self-interaction, this just contains kinetic terms. +The corresponding equations of motion are +∂i∂taa(0) +i +− ∂i∂iaa(0) +t += ∂i ˜Ea +i = 0, +∂t∂taa(0) +i +− ∂t∂iaa(0) +t += ∂t ˜Ea +i = 0. +(4.2) +The action and the equations of motion are copies of the electric sector of Carrollian abelian +theory discussed in [42], where Carrollian symmetry (boost and rotation) is analyzed. +Below, we will see the action’s full infinite Carrollian conformal invariance. Boost and +rotation invariance in our language are presented in Appendix A. +Gauge symmetry +The action here is just copies of the abelian action, so the gauge symmetry is like the +abelian theory. The transformations are given by +aa(0) +t +→ a +′a(0) +t += aa(0) +t ++ ∂tαa, +aa(0) +i +→ a +′a(0) +i += aa(0) +i ++ ∂iαa. +(4.3) +The action is invariant under the above gauge transformation, which are copies of the +abelian gauge transformation. +Spacetime symmetries +In the previous section, we talked about the gauge symmetry of the action. We will use +the action of CCA to find the symmetries of the action Eq.(4.1). In the representation +theory sec.2, we have some undefined constants. The value of these constants depends on +the fields of the theory under consideration. For example, the value of scaling dimension +(∆) for fields will be fixed when we impose dilatation invariance of the action. Similarly, +all other constants will be fixed when we impose other symmetries of the action. All the +four sectors of Lagrangian of the Carrollian Yang-Mills contain four sets of constants. Now +let’s discuss the first action we have stated above. +The action is trivially invariant under time and space translations (H, Pi). The in- +variance of the action under rotation (Jij), boost (Bi) are shown in Appendix A. Here +we will only show the invariance under dilatation (D), spatial special conformal trans- +formation (Ki), and supertranslation (Mf). We know that for different values of f the +supertranslation (Mf) operator contains Bi and K. +– 9 – + +Dilatation: +Using the action of dilatation operator described in (2.12), we write the +transformations of fields under the dilatation operator. The transformations of aa(0) +t +and +aa(0) +i +under dilatation is +δDaa(0) +t += (t∂t + xk∂k + ∆1)aa(0) +t +, +δDaa(0) +i += (t∂t + xk∂k + ∆2)aa(0) +i +. +(4.4) +Using these transformations in action (4.1), we can see the action changes as +δDL = ∂t +� +t ˜Ea(0) +i +˜Ea(0) +i +� ++ ∂k +� +xk ˜Ea(0) +i +˜Ea(0) +i +� +if +∆1 = ∆2 = 1. +(4.5) +So the action is invariant under dilatation in four spacetime dimensions if the scaling +dimension is one for both the temporal and spatial components of gauge fields. +Spatial SCT: +Similar to the above case, the transformation of fields under spatial con- +formal transformation is given below. +Here we take the transformation with arbitrary +constants introduced when we discussed representation theory. The transformations are +given by +δKlaa(0) +t += +� +2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +aa(0) +t ++ 2tqaa(0) +l +, +(4.6a) +δKlaa(0) +i += +� +2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +aa(0) +i ++ 2δlixkaa(0) +k +− 2δlkxiaa(0) +k ++ 2tq′δliaa(0) +t +. +(4.6b) +Using these transformations in action (4.1), we can see the action changes as +δKlL(0) = ∂t(xlt ˜Ea(0) +i +˜Ea(0) +i +) + ∂k +� +xkxl ˜Ea +i ˜Ea +i +� +− ∂l +�1 +2xkxk ˜Ea(0) +i +˜Ea(0) +i +� +. +(4.7) +So the action is invariant under spatial special conformal transformation if q = 0, q′ = 1. +Supertranslation: +Instead of seeing the boost and temporal conformal transformation +invariance of the action separately, we will see the supertranslation (Mf) invariance of the +action. Fields transform under this operator(Mf) as +δMf aa(0) +t += f(x)∂taa(0) +t +, +δMf aa(0) +i += f(x)∂taa(0) +i ++ aa(0) +t +∂if(x). +(4.8a) +Using these transformations in action (4.1), the action changes as +δMf L(0) = ∂t +� +f(x) ˜Ea(0) +i +˜Ea(0) +i +� +. +(4.9) +So the action is invariant under Mf. Thus we see that the action (4.1) is invariant under +full infinite CCA in four spacetime dimensions. +– 10 – + +Energy-Momentum tensor +The components of the energy-momentum tensors for the action (4.1) are given by +T t t = ˜Ea(0) +i +∂taa(0) +i +− ˜L(0), T t i = ˜Ea(0) +j +∂iaa(0) +j +, +(4.10) +T i t = − ˜Ea(0) +i +∂taa(0) +t +, T i +j = −Ea(0) +i +∂jaa(0) +t +− δi +j ˜L(0). +(4.11) +Using the improvement of energy-momentum tensor defined in [42], the improved energy- +momentum tensor in our case is +T µ ν = − +δL +δ∂µa(0)a +α +∂νa(0)a +α ++ δµ νL − +� +δµ +k∂t − δµ +t ∂k +�� +E(0)a +k +a(0)a +ν +� +, +(4.12) +whose components are +T t i = ˜Ea(0) +j +fa(0) +ij +, +T i t = 0, +T t t = 1 +2 +˜Ea(0) +j +˜Ea(0) +j +, +(4.13) +T i j = ˜Ea(0) +i +˜Ea(0) +j +− 1 +2δi +j ˜Ea(0) +k +˜Ea(0) +k +. +(4.14) +We can see the energy-momentum tensor is gauge invariant, traceless, and symmetric under +the interchange of spatial indices. The T i t component of the stress tensor is zero as required +by the Carroll symmetries. Below, we will see the conservation of energy-momentum tensor. +The relativistic Bianchi identity for Yang-Mills is given in Eq.(4.32). +Now we are +considering Carrollian Yang-Mills theory with δ = 1, and the Carrollian Bianchi identities +for this case are +∂t ˜fa +jk + ∂j ˜fa +kt + ∂k ˜fa +tj = 0, +(4.15a) +∂i ˜fa +jk + ∂j ˜fa +ki + ∂k ˜fa +ij = 0. +(4.15b) +Tilde means there are no interaction terms in the field strength, only abelian terms. These +are just copies of the Carrollian abelian Bianchi identity discussed in[42]. Using equations +of motion and Carrollian Bianchi identity we can see +∂tT t t + ∂iT i +t = 0, +using +(4.2), (4.15a), +(4.16a) +∂tT t +j + ∂iT i +j = 0, +using +(4.2), (4.15b). +(4.16b) +So the energy-momentum tensors satisfies conservation equations. We will return to this +section when discussing Noether charges and Quantum aspects. +4.2 +Electric Action II +The electric sector action Eq.(3.8), which has a non-abelian term, can be written in compact +form: +L0 = 1 +2 +� +(∂taa(0) +i +− ∂iaa(0) +t +)(∂taa(0) +i +− ∂iaa(0) +t +) + 2gfabc(∂taa(0) +i +− ∂iaa(0) +t +)ab(0) +t +ac(0) +i ++g2fabcfadeab(0) +t +ac(0) +i +ad(0) +t +ae(0) +i +� += 1 +2Ea(0) +i +Ea(0) +i +, (4.17) +– 11 – + +where Ea(0) +i += ∂taa(0) +i +− ∂iaa(0) +t ++ gfabcaa(0) +t +aa(0) +i +. The equations of motion following from +the action are given by +∂iEa(0) +i ++ gfabcab(0) +i +Ec(0) +i += D(0) +i +Ea(0) +i += 0, +(4.18a) +∂tEa(0) +i ++ gfabcab(0) +t +Ec(0) +i += D(0) +t Ea(0) +i += 0, +(4.18b) +where DiOa = ∂iOa + gfabcab(0) +i +Oc, DtOa = ∂tOa + gfabcab(0) +t +Oc. +Gauge Symmetry +The gauge transformations under which the action (4.17) is invariant are given by +aa(0) +t +→ aa(0)′ +t += aa(0) +t ++ 1 +g∂tαa + fabcab(0) +t +αc, +(4.19a) +aa(0) +i +→ aa(0)′ +i += aa(0) +i ++ 1 +g∂iαa + fabcab(0) +i +αc. +(4.19b) +This gauge transformation is the same as parent theory, but now we cannot write it in +covariant form like relativistic theory. Because, like the non-relativistic theory, the metrics +in Carrollian theory are degenerate, and time and space are not on the same footing. +Spacetime Symmetries +Dilatation: +Using the action of dilatation operator described in Eq. (2.12), we write the +transformations of fields under the dilatation operator. The transformations of aa(0) +t +and +aa(0) +i +under dilatation is +δDaa(0) +t += (t∂t + xk∂k + ∆1)aa(0) +t +, +δDaa(0) +i += (t∂t + xk∂k + ∆2)aa(0) +i +. +(4.20) +Using these transformations in action (4.17), we can see the action changes as +δDL = ∂t +� +tEa(0) +i +Ea(0) +i +� ++ ∂k +� +xkEa(0) +i +Ea(0) +i +� +if +∆1 = ∆2 = 1 . +(4.21) +If the scaling dimensions of both fields ∆1 and ∆2 are one, then the action is dilatation +invariant in four spacetime dimensions. +Spatial SCT: +Similar to the above case, the transformation of fields under spatial con- +formal transformation is given below. Here we take transformation with arbitrary constant +introduced when we discussed representation theory. The transformations are given by +δKlaa(0) +t += +� +2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +aa(0) +t ++ 2tqaa(0) +l +, +(4.22a) +δKlaa(0) +i += +� +2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +aa(0) +i ++ 2δlixkaa(0) +k +− 2δlkxiaa(0) +k ++ 2tq′δliaa(0) +t +. +(4.22b) +Using these transformations in action (4.17), we can see the action changes as +δKlL(0) = ∂t(xltEa(0) +i +Ea(0) +i +) + ∂k +� +xkxlEa +i Ea +i +� +− ∂l +�1 +2xkxkEa(0) +i +Ea(0) +i +� +(4.23) +if +q = 0, q′ = 1, +(4.24) +so here, we can see that the action is spatial special conformal invariant if the constants q +and q +′, respectively, are zero and one. +– 12 – + +Supertranslation: +Instead of seeing the boost and temporal conformal transformation +invariance of the action separately, we will see the supertranslation (Mf) invariance of the +action. Fields transform under this operator(Mf) as +δMf aa(0) +t += f(x)∂taa(0) +t +, +δMf aa(0) +i += f(x)∂taa(0) +i ++ aa(0) +t +∂if(x). +(4.25a) +Using these transformations in action (4.17), the action changes as +δMf L(0) = ∂t +� +f(x)Ea(0) +i +Ea(0) +i +� +, +(4.26) +so the action is invariant under supertranslation (Mf). +So from the above analysis, we conclude that the action (4.17) is invariant under infinite +CCA in spacetime dimension four if the scaling dimensions for both aa +t and aa +i are one. +Energy-Momentum tensor +The leading order Lagrangian or so-called electric sector is infinite Carrollian conformal in- +variant in 4d spacetime. Now let’s see the energy-momentum tensor for the action Eq.(4.17) +and how we can improve it. Different components of energy-momentum tensor for action +(4.17) are given by +T t i = Ea(0) +j +∂iaa(0) +j +, +T i t = −Ea(0) +i +∂taa(0) +t +, +T t t = Ea(0) +i +∂taa(0) +i +− L(0), +(4.27) +T i j = −Ea(0) +i +∂jaa +t − δi +jL(0). +(4.28) +We can see these are not gauge invariant, T i j component is not symmetric, and T i t +component is not zero, so we have to improve it. Using the improved energy-momentum +tensor defined in [42], the improved energy-momentum tensor for our case is +T µ ν = − +δL +δ∂µa(0)a +α +∂νa(0)a +α ++ δµ νL − +� +δµ +k∂t − δµ +t ∂k +�� +E(0)a +k +a(0)a +ν +� +, +(4.29) +and the components of improved E-M tensor are +T t i = Ea(0) +j +fa(0) +ij +, +T i t = 0, +T t t = 1 +2Ea(0) +j +Ea(0) +j +, +(4.30) +T i j = Ea(0) +i +Ea(0) +j +− 1 +2δi +jEa(0) +k +Ea(0) +k +. +(4.31) +We can see the energy-momentum tensor is gauge invariant, traceless, and symmetric under +the interchange of spatial indices. The T i t component of the stress tensor is zero as required +by Carroll symmetries. Below, we will see the conservation of energy-momentum tensor. +Before going to the conservation of energy-momentum tensor, let’s see Bianchi’s iden- +tity in the Carrollian limit. The relativistic Bianchi identity for the Yang-Mills is given +by +DνF a +βµ + DβF a +µν + DµF a +νβ = 0. +(4.32) +When discussing the expansion of action, we discussed the different values of λ. For different +λ, we will also have two distinct Bianchi identities. For λ = 0 or (δ = 0), we have Bianchi +– 13 – + +identity with a non-trivial self-interaction term. And for λ = 2 or (δ = 1), we will have +Bianchi identity copies of Carrollian abelian Bianchi identity. Which we have mentioned +in the previous sector. For λ = 0(δ = 0), Carrollian Bianchi identity is +Dtfa +jk + Djfa +kt + Dkfa +tj = 0, +(4.33a) +Difa +jk + Djfa +ki + Dkfa +ij = 0. +(4.33b) +Using equations of motion and above Carrollian Bianchi identity (4.33), we can see the +conservation of energy-momentum tensor as +∂tT t t + ∂iT i t = 0, +using +(4.18b), +(4.34) +∂tT t j + ∂iT i j = 0, +using +(4.18a), (4.33a). +(4.35) +This is our detailed discussion on the electric sector with non-abelian terms. In the next +section, we will focus on the magnetic sector. In the electric sector, we have seen that +the temporal components of field strength are dominated; in the subsequent section in the +magnetic sector, we will see the purely spatial sector of field strength will dominate, and +the temporal component will behave as a constraint. We will visit this electric sector again +when we discuss Noether charges and quantum aspects of the theory. +4.3 +Magnetic Action I +For δ = 1 case, the next to leading order(NLO) Lagrangian or the so called magnetic sector +is given in Eq.(3.11). For convenience, let’s write the action again here +L(1) = ˜Ea(1) +i +˜Ea(0) +i ++ gfabc ˜Ea(0) +i +ab(0) +t +ac(0) +i +− 1 +4 +˜fa(0) +ij +˜fa(0) +ij +. +(4.36) +If we take the variation of the Lagrangian with respect to next to leading order fields +aa(1) +t +, aa(1) +i +we will get Eq.(4.2), the leading order equation of motion. If we take variation +of the action w.r.t aa(0) +t +, aa(0) +i +(the leading order fields) the equations of motion are +∂i ˜Ea(1) +i ++ gfabc∂i +� +ab(0) +t +ac(0) +i +� ++ gfabcab(0) +i +˜Ec(0) +i += 0, +(4.37a) +∂t ˜Ea(1) +i ++ gfabc∂t +� +ab(0) +t +ac(0) +i +� ++ gfabcab(0) +t +˜Ec(0) +i +− ∂k ˜fa(0) +ki += 0. +(4.37b) +The action Eq.(4.36) and the above equations of motion are not Carroll invariant. +To +make these Carroll invariant, we have to impose constraints ˜Ea(0) +i += 0 in action (4.36). +The corresponding equation of motion is +∂k ˜fa(0) +ki += 0. +(4.38) +Similar to δ = 0 case, we will derive the Carrollian invariant magnetic sector for the +δ = 1 case using the Lagrange multiplier in the parent action. We can start from relativistic +Lagrangian with Lagrange multiplier ξa +i , +L = −c2 +2 ξa +i ξa +i + ξa +i F a +0i − 1 +4F a +ijF a +ij. +(4.39) +– 14 – + +From equation Eq.(3.4), we can see that for δ = 1 case, before doing the expansion, every +field gets scaled by c2. If we scale every field by c2 of the above equation and collect the +c4 term (for δ = 1 case, NLO action is of the order of c4 term of expansion (3.7)), the +resultant Lagrangian is +˜LNLO = ξa +i ˜Ea +i − 1 +4 +˜fa +ij ˜fa +ij. +(4.40) +If we vary the Lagrangian with respect to ξa +i , we will get ˜Ea +i = 0 constraint. So all the +equations of motion of the Lagrangian are +˜Ea +i = 0, +∂iξa +i = 0, +∂tξa +i − ∂j ˜fa +ji = 0. +(4.41) +We will see its gauge symmetry and full spacetime symmetry below. +Gauge symmetry +The gauge symmetry of the action is not non-abelian. It reduces to copies of abelian or +u(1) symmetry. The action is invariant under the gauge transformation +aa +t → a +′a +t = aa +t + ∂tαa, +aa +i → a +′a +i = aa +i + ∂iαa, +ξa +i → ξ +′a +i = ξa +i . +(4.42) +So the action is symmetric under n2 − 1 copies of abelian symmetry. The Lagrange multi- +plier ξa +i behaves as a scalar under gauge transformation. +Spacetime symmetries +The action (4.40) is copies of the magnetic sector of Carrollian abelian theory discussed in +[42]. Carroll symmetry of the action is analyzed in that paper. Here we will see Carrollian +conformal invariance of the action. The transformations of different fields are the same as +magnetic sector fields of δ = 0. In this section, we only give how action changes under +dilatation, spatial SCT, and supertranslation. If readers want to see rotation and boost +invariance of action, they can check the appendix. +Dilatation: +Transformations of gauge fields(aa +t , aa +i ) and Lagrange multiplier(ξa +i ) under +the dilatation operator(D) are given by +δDaa +t = (t∂t + xk∂k + ∆1)aa +t , +δDaa +i = (t∂t + xk∂k + ∆2)aa +i , +(4.43a) +δDξa +i = (t∂t + xk∂k + ∆ξ)ξa +i . +(4.43b) +Using this transformation in (4.40), the change of action as +δDL = ∂t +� +t ˜Ea +i ˜Ea +i +� ++ ∂k +� +xk ˜Ea +i ˜Ea +i +� ++ ∂t +� +− 1 +4 +˜fa +ij ˜fa +ij +� ++ ∂k +� +− 1 +4 +˜fa +ij ˜fa +ij +� +if +∆1 = ∆2 = 1 +and +∆ξ = 2, +(4.44) +so the action is invariant under dilatation transformation in four spacetime dimensions if +the scaling dimensions of the temporal and spatial component of gauge fields are one, and +for ξi scaling dimension is two. +– 15 – + +Spatial SCT: +Transformation of fields aa +t , aa +i and ξa +i under spatial SCT are given by +δKlaa +t = +� +2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +aa +t + 2tqaa +l , +(4.45a) +δKlaa +i = +� +2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +aa +i + 2δlixkaa +k − 2δlkxiaa +k + 2tq′δliaa +t , (4.45b) +δKlξa +i = +� +4xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +ξa +i + 2δlixkξa +k − 2δlkxiξa +k + 2tq′′δliaa +t ++2q′′′t ˜fa +il, (4.45c) +using these transformations in (4.40), the action changes as +δKlL(0) = ∂t(2xltξa +i ˜Ea(0) +i +) − ∂t +�1 +2txl ˜fa +ij ˜fa +ij +� ++ ∂k +� +2xkxlξa +i ˜Ea +i +� +− ∂k +�1 +2xkxl ˜fa +ij ˜fa +ij +� +−∂l +� +xkxkξa +i ˜Ea(0) +i +� ++ ∂l +�1 +4xkxk ˜fa +ij ˜fa +ij +� +, if q = 0, q′ = 1, q′′ = 0, q′′′ = −1. +(4.46) +So the action is invariant under spatial conformal transformation. +Supertranslation: +Lastly, invariance under supertranslation (Mf), which contains Hamil- +tonian, temporal spacial conformal, and boost operator for different choice of f. Under +this operator, fields transform as +δMf aa +t = f(x)∂taa +t , +δMf aa +i = f(x)∂taa +i − aa +t ∂if(x), +δMf ξa +i = f(x)∂tξa +i − fa +ik∂kf(x),(4.47) +using these in (4.40), the actions changes as +δMf L(0) = ∂t +� +f(x)ξa +i ˜Ea(0) +i +− 1 +4 +˜fa +ij ˜fa +ij +� ++ ∂i +� +− 1 +2 +˜fa +ij ˜Ea +j +� ++ ∂j +�1 +2 +˜fa +ij ˜Ea +i +� +. +(4.48) +So the action is invariant under Mf. The magnetic sector with δ = 1 case is invariant +under infinite CCA in 4d spacetime. +Energy-Momentum tensor +Now we will see the energy-momentum tensor and its conservation. If we derive the energy- +momentum tensor from (4.40) we will get +T t i = ξa +k∂iaa +k, +T i t = −xa +i ∂taa +t − ˜fa +ik∂taa +k +(4.49) +T t t = ξa +i ∂taa +k − L, +T i j = −ξa +i ∂jaa +t − ˜fa +ik∂jaa +k − δi +jL. +(4.50) +Here we also need an improved energy-momentum tensor as the electric sector. Following +[42], the improved energy-momentum tensor formula for the magnetic sector is +T µν = − +δL +δ∂µaaα +∂νaa +α + δµνL − δµt∂i [ξa +i aa +ν] + δµi +� +∂t(ξa +i aa +ν) + ∂j( ˜fa +ijaa +ν) +� +. +(4.51) +If we write it components wise explicitly +T t i = ξa +k ˜fa +ik, +T i t = 0, +T t t = 1 +4 +˜fa +ij ˜fa +ij, +T i j = − ˜fa +ik ˜fa +jk − δi +jL. +(4.52) +– 16 – + +Here we can see energy-momentum tensor is gauge invariant, traceless, symmetric in spatial +indices, and T i +t = 0 as expected for Carroll theory. Using equations of motion and Carrollian +Bianchi identity, we can see +∂tT t t + ∂iT i t = 0, +using +(4.41), (4.33a). +(4.53a) +∂tT t j + ∂iT i j = 0, +using +(4.41), (4.33b). +(4.53b) +the energy-momentum tensor satisfies conservation equations. +4.4 +Magnetic Action II +In this section, we will study details of the NLO Lagrangian or so-called magnetic sector. +The NLO Lagrangian contains leading order and NLO fields. From the expansion of action +section, we have the NLO Lagrangian (coefficient of c0) in Eq.(3.9). +The action looks +horrible to analyze. Thanks to Jacobi’s identity, +fbcafdae + fdbafcae + fcdafbae = 0, +(4.54) +using this, we can simplify the action. Using the above identity and doing some calculations, +we can write the NLO Lagrangian in this form +L(1) = +� +D(0) +t aa(1) +i +� +Ea(0) +i +− +� +D(0) +i +aa(1) +t +� +Ea(0) +i +− 1 +4fija(0)fa(0) +ij +. +(4.55) +If we take the variation of the Lagrangian with respect to next to leading order fields +aa(1) +t +, aa(1) +i +we will get Eq.(4.18), leading order equations of motion as a property of this +formalism. If we take variation with respect to leading order fields (aa(0) +t +, aa(0) +i +), equations +of motion are +D(0) +i +D(0) +i +aa(1) +t +− D(0) +i +D(0) +t aa(1) +i +− gfabcab(1) +i +Ec(0) +i += 0, +(4.56a) +D(0) +t D(0) +t aa(1) +i +− D(0) +t D(0) +i +aa(1) +t +− gfabcab(1) +t +Ec(0) +i +− D(0) +k fa(0) +ki += 0, +(4.56b) +where D(0) +k fa(0) +ki += ∂kfa(0) +ki ++ gfabcab(0) +k +fc(0) +ki . Although the action and the equations of mo- +tion look nice in compact form, these are not Carroll invariant. To make Carroll invariant, +we have to take the constraint Ea(0) +i += 0 at the level of action Eq.(4.55). Then action will +become − 1 +4fija(0)fa(0) +ij +and equations of motion will be D(0) +k fa(0) +ki += 0. +We can derive the Carroll invariant magnetic sector from the Relativistic Yang-Mills +action if we consider a Lagrange multiplier in relativistic Lagrangian and then take speed +of light to zero limit. The relativistic Lagrangian with Lagrange multiplier ξa +i and explicit +c factor is given by +L = −c2 +2 ξa +i ξa +i + ξa +i F a +0i − 1 +4F a +ijF a +ij. +(4.57) +From here, we can get back to the usual Yang-Mills action if we integrate out ξi fields. +Now we can see if we take the small c limit here, we will get +LNLO = ξa +i (∂taa(0) +i +− ∂iaa(0) +t +) − 1 +4(∂iaa +j − ∂jaa +i )(∂iaa +j − ∂jaa +i ) + gfabcab +tac +iξa +i +−gfabcab +iac +j∂iaa +j − 1 +4g2fabcfadeab +iac +jad +i ae +j = ξa +i Ea +i − 1 +4fa +ijfa +ij. (4.58) +– 17 – + +The Lagrangian contains non-trivial self-interaction terms or non-abelian terms. The equa- +tions of motion of this action are +Ea +i = 0, +Diξa +i = 0, +Dtξi − Djfji = 0. +(4.59) +Here we are getting the constraints Ea(0) +i += 0 as an equations of motion for the Lagrange(ξa +i ). +Below we will see the full spacetime symmetry of this action. +Gauge symmetry +Before seeing the spacetime symmetry of the action, it will be good to check the gauge +symmetry. The action Eq.(4.58) is invariant under the gauge transformation +aa +t → a +′a +t = aa +t + 1 +g∂tαa + fabcab +tαc, +(4.60a) +aa +i → a +′a +i = aa +i + 1 +g∂iαa + fabcab +iαc, +(4.60b) +ξa +i → ξ +′a +i = ξa +i + fabcξb +i αc. +(4.60c) +The temporal and spatial component of the gauge field is transformed in the same way +as the electric sector. The Lagrange multiplier ξa +i transforms as a scalar in the adjoint +representation of the underlying gauge group. +Spacetime symmetries +Similar to the electric sector discussed above, we will see the symmetry of the action +under dilatation (D), spatial SCT (Ki), and supertranslation Mf. The Rotation and boost +invariance is shown in the appendix. +Dilatation: +Transformations of gauge fields(aa +t , aa +i ) and Lagrange multiplier(ξa +i ) under +the dilatation operator(D) are given by +δDaa +t = (t∂t + xk∂k + ∆1)aa +t , +δDaa +i = (t∂t + xk∂k + ∆2)aa +i , +(4.61a) +δDξa +i = (t∂t + xk∂k + ∆ξ)ξa +i , +(4.61b) +using this transformation in (4.58), the change of action as +δDL = ∂t +� +tEa +i Ea +i +� ++ ∂k +� +xkEa +i Ea +i +� ++ ∂t +� +− 1 +4fa +ijfa +ij +� ++ ∂k +� +− 1 +4fa +ijfa +ij +� +(4.62) +if +∆1 = ∆2 = 1, +∆ξ = 2. +(4.63) +So the action is invariant under dilatation in four spacetime dimensions if the scaling +dimensions of the temporal and spatial component of gauge fields are one, and for ξi +scaling dimension is two. +– 18 – + +Spatial SCT: +Transformation of fields aa +t , aa +i and ξa +i under spatial SCT are given by +δKlaa +t = +� +2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +aa +t + 2tqaa +l , +(4.64a) +δKlaa +i = +� +2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +aa +i + 2δlixkaa +k − 2δlkxiaa +k + 2tq′δliaa +t , (4.64b) +δKlξa +i = +� +4xl + 2xlt∂t + 2xkxl∂k − xkxk∂l +� +ξa +i + 2δlixkξa +k − 2δlkxiξa +k + 2tq′′δliaa +t ++2q′′′tfa +il, (4.64c) +using these transformations in (4.58), the action changes as +δKlL(0) = ∂t(2xltξa +i Ea(0) +i +) − ∂t +�1 +2txlfa +ijfa +ij +� ++ ∂k +� +2xkxlξa +i Ea +i +� +− ∂k +�1 +2xkxlfa +ijfa +ij +� +−∂l +� +xkxkξa +i Ea(0) +i +� ++ ∂l +�1 +4xkxkfa +ijfa +ij +� +. (4.65) +So the action is invariant under spatial special conformal transformation if q = 0, q′ = +1, q′′ = 0, q′′′ = −1. +Supertranslation: +Lastly, invariance under supertranslation (Mf), which contains Hamil- +tonian, temporal spacial conformal, and boost operator for different choice of f. Under +this operator, fields transform as +δMf aa +t = f(x)∂taa +t , +δMf aa +i = f(x)∂taa +i − aa +t ∂if(x), +δMf ξa +i = f(x)∂tξa +i − fa +ik∂kf(x),(4.66) +using these in (4.58), the actions changes as +δMf L(0) = ∂t +� +f(x)ξa +i Ea(0) +i +− 1 +4fa +ijfa +ij +� ++ ∂i +� +− 1 +2fa +ijEa +j +� ++ ∂j +�1 +2fa +ijEa +i +� +. +(4.67) +The action is invariant under Mf. +Now we conclude the NLO Lagrangian or the magnetic sector action for the λ = 0 case +Eq.(4.58) is invariant under full infinite CCA symmetry in four spacetime dimensions. +Energy-Momentum tensor +Like the electric sector, the NLO Lagrangian or magnetic sector is infinite Carrollian con- +formal invariant in 4d spacetime. Now let’s see what the energy-momentum tensor for the +action Eq.(4.58) is and see how we can improve it. Energy-momentum tensor of the action +(4.58) is given by +T t i = ξa +k∂iaa +k, +T i t = −xa +i ∂taa +t − fa +ik∂taa +k, +(4.68) +T t t = ξa +i ∂taa +k − L, +T i j = −ξa +i ∂jaa +t − fa +ik∂jaa +k − δi +jL. +(4.69) +Here we also need an improved energy-momentum tensor as the electric sector. Following +[42], the improved energy-momentum tensor formula for the magnetic sector is +T µν = − +δL +δ∂µaaα +∂νaa +α + δµνL − δµt∂i [ξa +i aa +ν] + δµi +� +∂t(ξa +i aa +ν) + ∂j(fa +ijaa +ν) +� +. +(4.70) +If we write it components wise explicitly +T t i = ξa +kfa +ik, +T i t = 0, +T t t = 1 +4fa +ijfa +ij, +T i j = −fa +ikfa +jk − δi +jL. +(4.71) +– 19 – + +Here we can see energy-momentum tensor is gauge invariant, traceless, symmetric in spatial +indices, and T i +t = 0 as expected for Carroll theory. Using equations of motion and Carrollian +Bianchi identity, we can see +∂tT t t + ∂iT i t = 0, +using +(4.59), (4.33a), +(4.72a) +∂tT t j + ∂iT i j = 0, +using +(4.59), (4.33b). +(4.72b) +the energy-momentum tensor satisfies conservation equations. +5 +Noether charges and Charge algebra +All the four sectors of Carrollian Yang-Mills theory are invariant under infinite Carrollian +conformal symmetry. In this section, we will study Noether’s charges and charge algebra +and see if there is any central extension for any commutation relation. +If we vary the Lagrangian (L = +� +dd−1x L) on-shell on the field space in an arbitrary +direction: ϕ → ϕ + δϕ, we have +δL = +� +dd−1x +� +∂tΘ(ϕ, ∂ϕ, δϕ) +� +: on-shell. +(5.1) +Here the expression for the Θ for all four sectors of action are +δ = 0 : +Electric Sector Θ = δaa +i Ea(0) +i +. +Magnetic Sector Θ = δaa +i ξa +i . +(5.2) +δ = 1 : +Electric Sector Θ = δaa +i ˜Ea(0) +i +. +Magnetic Sector Θ = δaa +i ξa +i . +(5.3) +Next, we consider a specific infinitesimal transformation ϕ → ϕ + δϵϕ off-shell. The varia- +tion δϵ is said to be a symmetry, if: +δϵL = +� +dd−1x +� +∂tβ(ϕ, ∂ϕ, δϵϕ) +� +: off-shell, +(5.4) +for some function β in field space. If we compare (5.1) and (5.4), we deduce that on-shell: +∂tQϵ := +� +dd−1x∂t (Θ(Φ, ∂Φ, δϵΦ) − β(Φ, ∂Φ, δϵΦ)) = 0. +(5.5) +Noether Charge is Q = +� +dd−1x +� +Θ − β +� +. Charges are listed below for different cases +Electic(δ = 0) +QBoost = +� +d3x +� +xk∂taa(0) +i +Ea(0) +i ++ aa(0) +t +Ea(0) +i +− xk +2 Ea(0) +i +Ea(0) +i +� +, +(5.6a) +QDilation = +� +d3x +� +t∂taa(0) +i +Ea(0) +i ++ xk∂kaa(0) +i +Ea(0) +i ++ a0 +i Ea(0) +i +− tEa(0) +i +Ea(0) +i +� +, +(5.6b) +QSpatial SCT = +� +d3x +� +2xkt∂taa(0) +i +Ea(0) +i ++ 2xlxk∂laa(0) +i +− xlxl∂kaa(0) +i +Ea(0) +i ++ 2xkaa(0) +i +Ea(0) +i ++2xlaa(0) +l +Ea(0) +k +− 2xiaa(0) +k +Ea(0) +i +− txkEa(0) +i +Ea(0) +i +� +, +(5.6c) +QMf = +� +d3x +� +f(x)∂taa(0) +i +Ea(0) +i +− aa(0) +t +∂if(x)Ea(0) +i +− f(x)Ea(0) +i +Ea(0) +i +� +. +(5.6d) +– 20 – + +Magnetic(δ = 0) +QBoost = +� +d3x +� +xk∂taa +i ξa +i − aa +t ξa +i − xkξa +i Ea +i + xk +1 +4F a +ijF a +ij +� +, +(5.7a) +QDilation = +� +d3x +� +t∂taa +i ξa +i + xk∂kaa +i ξa +i + aa +i ξa +i − tξa +i Ea +i + 1 +4tfa +ijfa +ij +� +, +(5.7b) +QSpatial SCT = +� +d3x +� +2xkt∂taa +i ξa +i + 2xlxk∂laa +i ξa +i − xlxl∂kaa +i ξa(0) +i ++ 2xkaa +i ξa +i ++2xlaa +l ξa(0) +k +− 2xiaa +kξa +i + 2tξa +kaa +t − 2txkEa +i ξa +i − 1 +2txkfa +ijfa +ij +� +, +(5.7c) +QMf = +� +d3x +� +f(x)∂taa(0) +i +ξa +i + aa(0) +t +∂if(x)ξa +i − f(x)ξa +i Ea(0) +i +− 1 +4fa +ijfa +ij +� +. +(5.7d) +Similarly, for the electric and the magnetic sector of the δ = 1 case, we can write Noether’s +charge using Eq.(5.3). Expression of charges for δ = 1 case are similar to δ = 0 case, but +instead of Ea +i and fa +ij we have to write in term of ˜Ea +i and ˜fa +ij respectively. +If we check the charge algebra using these charges, there is no central extension for +any commutation. In the Galilean Yang-Mills case, there is a non-trivial state-dependent +central charge in the charge algebra [67]. +Here we will give just one example for the +electric sector, and all the other commutation relations can be realized similarly. Using the +expression of Θ, we can define the Poisson bracket for the electric sector as +Ω(δ1, δ2) = δ1Θ(δ2) − δ2Θ(δ1) = δ1aa(0) +i +δ2Ea(0) +i +− δ2aa(0) +i +δ1Ea(0) +i +. +(5.8) +Now if we check the algebra between dilatation (D) and supertranslation (Mf) using trans- +formations of different fields in the electric sector, we can see that +Ω(D, Mf) = QMh, +where +h = xk∂kf(x) − f(x). +(5.9) +Here we can see that the commutation relation between dilatation and supertranslation is +satisfied and there is no central charge. Similarly, we can realize all other commutation +relations of infinite CCA for all the four sectors. A detailed discussion of the charge algebra +is given in Appendix B. +6 +Propagator and Vertices +We discussed the construction of Carrollian Yang-Mills actions, symmetry of all four sec- +tors, energy-momentum tensor and its conservation, Noether charges, and charge algebra. +Now we will start the Quantum aspects of the theory. +Details discussion on quantum +properties will be in our subsequent work; here will give all the propagators and vertices +for all four sectors. +For δ = 1 sector will discuss first. In this case, Lagrangian contained only kinetic +terms. +So there will be only propagators. +After that, we will discuss the propagator, +vertices, and Feynman diagram for the δ = 0 cases. +– 21 – + +6.1 +Electric Sector I +For the δ = 1 case, similar to the relativistic case, we cannot calculate the propagator +without adding a gauge fixing term. The full electric sector(δ = 1) Lagrangian with a +gauge fixing term is +L = 1 +2(∂taa(0) +i +− ∂iaa(0) +t +)(∂taa(0) +i +− ∂iaa(0) +t +) − 1 +2χ∂taa +t ∂taa +t , +(6.1) +where χ is gauge parameter. In order to get propagators from this kinetic part of the +Lagrangian, let us first introduce Fourier transformation to momentum space +Φa(t, ⃗x) = +� dω +2π +d3⃗k +(2π)3 e−iωtei⃗k·⃗x ˜Φa(ω,⃗k), +(6.2) +where Φa = (aa +t , aa +i , ca, ¯ca, ξa +i ), and delta functions +� +dt +2πe−iωt = δ(ω), +� +d3⃗x +(2π)3 ei⃗k·⃗x = δ(3)(⃗k). +(6.3) +We also introduce the notation, k = (ω,⃗k) and Aa +I = (aa +t , aa +i ). Taking Fourier transforma- +tion and using delta functions, the action becomes +S = +� dωd3⃗k +(2π)4 +�1 +2Aa +I(k)dIJabAb +J(−k) +� +, +(6.4) +where +dIJab(k) = iδab +� +−k2 + ω2 +χ +−ωkj +−ωki +−ω2δij +� +. +(6.5) +Then from the inverse of dIJab, we get the propagators for the fields Aa +I as +⟨Aa +IAb +j⟩ = −iδab +� +χ +ω2 +− kiχ +ω3 +− kjχ +ω3 +kikjχ−ω2δij +ω4 +� +. +(6.6) +where Aa +I = aa +t , aa +i . +a +b +I +J +k +(a) Gauge field propagator ⟨Aa +IAb +J⟩ +Figure 1: Electric Propagator (δ = 1) +– 22 – + +6.2 +Magnetic sector I +Let’s consider the magnetic sector Lagrangian for δ = 1 before adding any gauge fixing +term. The action (4.40) explicitly in terms of gauge fields is +L = ξa +i (∂taa(0) +i +− ∂iaa(0) +t +)(∂taa(0) +i +− ∂iaa(0) +t +) − 1 +4(∂iaa +j − ∂jaa +i )(∂iaa +j − ∂jaa +i ). +(6.7) +Because there is no interaction term in the above action, no vertices are possible; only +propagators will be there. If we write the above action in momentum space using Eq.(6.2) +and Eq.(6.3), we have +S = +� dωd3⃗k +(2π)4 +�1 +2Aa +I(k)dIJabAb +J(−k) +� +, +(6.8) +where +dIJab(k) = iδab +� +� +� +� +0 +� +3×3 +� +iki +� +3×1 +− i +� +ωδij +� +3×3 +� +iki +� +1×3 +01×1 +� +0 +� +1×3 +−i +� +ωδij +� +3×3 +� +0 +� +3×1 +� +k2δij − kikj +� +3×3 +� +� +� . +(6.9) +The determinant of this matrix is zero, so we cannot derive a propagator by doing the +inverse of this matrix. We need to add a gauge fixing term, so the full Lagrangian with +gauge fixing term is +L = ξa +i (∂taa(0) +i +− ∂iaa(0) +t +)(∂taa(0) +i +− ∂iaa(0) +t +) − 1 +4(∂iaa +j − ∂jaa +i )(∂iaa +j − ∂jaa +i ) +− 1 +2χ∂iaa +i ∂jaa +j.(6.10) +Similar to equation(6.8) when we write this gauge fixed Lagrangian in momentum space +the matrix (dIJab(k)) now become +dIJab(k) = δab +� +� +� +� +0 +� +3×3 +� +iki +� +3×1 +− i +� +ωδij +� +3×3 +� +iki +� +1×3 +01×1 +� +0 +� +1×3 +−i +� +ωδij +� +3×3 +� +0 +� +3×1 +� +k2δij − (1 − 1 +χ)kikj +� +3×3 +� +� +� . +(6.11) +The determinant of this matrix is non-zero. The propagator, by doing the inverse of the +above matrix is +⟨Aa +IAb +J⟩ = δab +� +� +� +� +� k2δij−kikj +ω2 +� +3×3 +� −ikj +k2 +� +3×1 +i +� k2δij−kikj +ωk2 +� +3×3 +� −iki +k2 +� +1×3 +ω2χ +k4 +� kjωχ +k4 +� +1×3 +i +� k2δij−kikj +ωk2 +� +3×3 +� kjωχ +k4 +� +3×1 +� −kikjχ +k4 +� +3×3 +� +� +� +� . +(6.12) +where Aa +I = ξa +i , aa +t , aa +i . +This section considered Lagrangian for the δ = 1 case for the electric and magnetic +sectors. These Lagrangians only contain kinetic terms, so there are only propagators, not +vertices. +– 23 – + +a +b +I +J +k +(a) Gauge field propagator ⟨Aa +IAb +J⟩ +Figure 2: Magnetic Propagator (δ = 1) +6.3 +Electric sector II +Now focus on the δ = 0 cases for propagator and vertices containing non-abelian or self- +interaction terms. The full Lagrangian for the δ = 0 electric sector with gauge fixing term +and ghost term is +L = 1 +2Ea(0) +i +Ea(0) +i +− 1 +2χ∂taa +t ∂taa +t + +∂t¯caDtca. +(6.13) +The kinetic part of the above Lagrangian is +Lkin = 1 +2(∂taa(0) +i +− ∂iaa(0) +t +)(∂taa(0) +i +− ∂iaa(0) +t +) − 1 +2χ∂taa +t ∂taa +t + ∂t¯ca∂tca. +(6.14) +In momentum space using equations (6.2) and (6.3) we can write +Skin = +� dωd3⃗k +(2π)4 +�1 +2Aa +I(k)dIJabAb +J(−k) + ¯ca(k) +� +− ω2� +ca(−k) +� +, +(6.15) +The kinetic part of gauge fields and the gauge fixing term is the same as the Eq.(6.1). So +the expression of dIJab is same as (6.5). The inverse of this matrix is the propagator for +gauge fields given in equation (6.6). And the inverse of the coefficient of ¯cc in (6.25) gives +the propagator for ghost fields as +⟨¯ca(k)cb(−k)⟩ = iδab +ω2 . +(6.16) +In a compact form the propagators and Feynman diagrams of the Lagrangian are +a +b +I +J +k +≡ −iδab +� +χ +ω2 +− kiχ +ω3 +− kjχ +ω3 +kikjχ−ω2δij +ω4 +� +, +a +b +k +≡ ⟨¯ca(k)cb(−k)⟩ = iδab +ω2 . +. +(6.17a) +Interaction terms of the Lagrangian (Eq.(6.13)) are +Lint = 2gfabcab +tac +i∂tac +i − 2gfabcab +tac +i∂iaa +t + g2fabcfadeab +tac +iad +t ae +i − gfabcaa +t ∂t¯cccb. (6.18) +– 24 – + +By transforming to momentum space(6.2) and using the delta functions (6.3) definition, +we can write the three field interaction terms as +S(3) +int = +� +1 +(2π)12 +3 +� +i=1 +dωid3⃗ki (2π)4δ(ω1 + ω2 + ω3)δ(3)(⃗k1 + ⃗k2 + ⃗k3)gfabc × +� +(ω1 − ω2)ab +t(k1)ac +i(k2)aa +i (k3) + iδij(k1 − k2)iab +t(k1)ac +t(k3)aa +j(k2) +−iω2φa(k1)¯cb(k2)cc(k3) +� +, (6.19) +where +n +� +i=1 +dωid3⃗ki = dω1d3⃗k1...dωnd3⃗kn. From this expression we can write the 3-point +vertices as +V abc +3 ataiai = −gfabc(ω1 − ω2), V abc i +3 ataiai = −gfabc(k1 − k2)i, V abc +3 at¯cc = gfabcω2, (6.20) +Similarly, transforming the four field interaction terms of Sint to momentum space, we get +S(4) +int = +� +1 +(2π)16 +4 +� +i=1 +dωid3⃗ki (2π)4δ(ω1 + ω2 + ω3 + ω4)δ(3)(⃗k1 + ⃗k2 + ⃗k3 + ⃗k4) × +g2� +fabcfadeab +t(k1)ac +i(k2)ad +t (k3)ae +i(k4) +� +,(6.21) +from which we can read of the 4-point vertices +V bcde +4 ataiataj = −2ig2δij +� +fabcfade + fabefadc� +. +(6.22) +6.4 +Magnetic sector II +The full magnetic sector Lagrangian for the δ = 0 case with gauge fixing term and ghost +term is +L = ξa +i Ea +i − 1 +4fa +ijfa +ij − 1 +2χ∂iaa +i ∂jaa +j − ∂i¯caDica. +(6.23) +The kinetic part of the Lagrangian is +Lkin = ξa +i (∂taa(0) +i +− ∂iaa(0) +t +) − 1 +4(∂iaa +j − ∂jaa +i )(∂iaa +j − ∂jaa +i ) − 1 +χ∂iaa +i ∂jaa +j − ∂i¯ca∂ica.(6.24) +Using equations (6.2) and (6.3) we can write the above Lagrangian in momentum space as +Skin = +� dωd3⃗k +(2π)4 +�1 +2Aa +I(k)dIJabAb +J(−k) + ¯ca(k) +� +− ⃗k2� +ca(−k) +� +, +(6.25) +where Aa +µ = ξa +i , aa +t , aa +i . +The kinetic terms of gauge fields of this magnetic sector is the same as the Eq.(6.10), +so the matrix dIJab is same as Eq.(6.11), so as the propagator Eq.(6.12). The inverse of +the coefficient of ¯cc gives the propagator for the ghost fields as +⟨¯ca(k)cb(−k)⟩ = iδab +⃗k2 . +(6.26) +– 25 – + +a +k1 +k3 +c +k2 +at +aj +b +ai +(a) V abc +3 ataiaj +a +k1 +k3 +c +k2 +at +at +b +ai +(b) V abc i +3 atatai +b +k1 +k4 +e +k2 +ai +at +c +aj +d +at +k3 +(c) V bcde ij +3 atataiaj +b +c +a +at +k1 +k3 +k2 +(d) V abc +3 at¯cc +Figure 3: Electric Sector Feynman Diagrams +In a compact form the propagators and Feynman diagram of the Lagrangian are +a +b +I +J +k +≡ δab +� +� +� +� +� k2δij−kikj +ω2 +� +3×3 +� −ikj +k2 +� +3×1 +i +� k2δij−kikj +ωk2 +� +3×3 +� −iki +k2 +� +1×3 +ω2χ +k4 +� kjωχ +k4 +� +1×3 +i +� k2δij−kikj +ωk2 +� +3×3 +� kjωχ +k4 +� +3×1 +� −kikjχ +k4 +� +3×3 +� +� +� +� , +a +b +k +≡ ⟨¯ca(k)cb(−k)⟩ = iδab +⃗k2 . +(6.27) +Interaction terms of the Lagrangian are +Lint = gfabcab +tac +iξa +i − gfabcab +iac +j∂iaa +j − 1 +4g2fabcfadeab +iac +jad +i ae +j − gfabcab +i∂i¯cacc. +(6.28) +All the three point interactions in momentum space using (6.2) and (6.3) are +S(3) +int = +� +1 +(2π)12 +3 +� +i=1 +dωid3⃗ki (2π)4δ(ω1 + ω2 + ω3)δ(3)(⃗k1 + ⃗k2 + ⃗k3)gfabc × +� +iδijξa +i (k1)ab +t(k2)ac +j(k3) + + i +6 +� +(k1 − k2)iδilδjk + (k2 − k3)iδijδlk + (k3 − k1)iδikδjl� +× +aa +j(k1)ab +k(k2)ac +l (k3) + iδijk2jaa +i (k1)¯cb(k2)cc(k3) +� +,(6.29) +from where we can write the 3-point vertices as +V abc +3 ξiatai = −gfabc, +V abc +3 ai¯cc = −gfabcki +2. +V abc ijk +3 aiajak = −gfabc� +(k1 − k2)kδij + (k2 − k3)iδjk + (k3 − k1)jδik� +. +(6.30) +– 26 – + +The four point interaction terms in momentum space are +S(4) +int = +� +1 +(2π)16 +4 +� +i=1 +dωid3⃗ki (2π)4δ(ω1 + ω2 + ω3 + ω4)δ(3)(⃗k1 + ⃗k2 + ⃗k3 + ⃗k4) × +g2� +− 1 +24 +� +fabcfade(δikδjl − δilδjk) + fabdface(δijδkl − δilδjk) ++fabefacd(δijδkl − δikδjl) +� +ab +i(k1)ac +j(k2)ad +k(k3)ae +l (k4) +� +, +(6.31) +from which we can read of the 4-point vertices as +V bcde ijkl +4 aiajakal = −ig2� +fabcfade(δikδjl − δilδjk) + fabdface(δijδkl − δilδjk) ++fabefacd(δijδkl − δikδjl) +� +. +(6.32) +We will study the quantum properties of the non-trivial sectors of CYM in detail in our +a +k1 +k3 +c +k2 +at +ξi +b +ai +(a) V abc +3 ξiataj +a +k1 +k3 +c +k2 +aj +ai +b +ak +(b) V abc +3 aiajak +b +k1 +k4 +e +k2 +aj +ai +c +al +d +ak +k3 +(c) V bcde ijkl +4 aiajakal +a +c +b +ai +k1 +k3 +k2 +(d) V abc +4 ai¯cc +Figure 4: Magnetic Sector Feynman Diagrams +subsequent work using the Feynman rules listed above. After that, we will add the matter +field to Carrollian Yang-Mills and construct a QCD-like structure in the Carrollian theory. +6.5 +Propagators in position space +In this section, we will see propagator of gauge fields in position. In momentum space, the +electric and magnetic sectors’ propagators are (6.6) and (6.12). All the correlation function +of the electric and magnetic sector in position space is listed below +– 27 – + +Electric +Gab +tt (x − y) = +� � dωd3⃗k +(2π)4 ⟨aa +t ab +t⟩e−iωteikixi = δabχ 2πtδ3(⃗x), +(6.33) +Gab +ij (x − y) = +� � dωd3⃗k +(2π)4 ⟨aa +i ab +j⟩e−iωteikixi = δab� +χ 2 +3πt3∂i∂jδ3(⃗x) − δij2πtδ3(⃗x) +� +, (6.34) +Gab +ti (x − y) = +� � dωd3⃗k +(2π)4 ⟨aa +t ab +i⟩e−iωteikixi = δabχ πt2∂iδ3(⃗x). +(6.35) +We can see the propagator is of the form of δ(x) with some time function. This means +there is no propagation in space, only propagation in time. This is the behaviour expected +from electric versions of theories and has been observed e.g. in the theory of scalars and +U(1) gauge fields earlier. +Magnetic +Gab +tt (x − y) = +� � dωd3⃗k +(2π)4 ⟨aa +t ab +t⟩e−iωteikixi == −δab∂t∂tδ(t)4πrΛ, +(6.36) +Gab +ti (x − y) = +� � dωd3⃗k +(2π)4 ⟨aa +t ab +i⟩e−iωteikixi = −δab∂tδ(t)4πxi +r Λ, +(6.37) +Gab +ij (x − y) = +� � dωd3⃗k +(2π)4 ⟨aa +i ab +j⟩e−iωteikixi = 4πδabδ(t){δij +r − xixj +r3 }, +(6.38) +Gab +ti (x − y)ξ = +� � dωd3⃗k +(2π)4 ⟨ξa +i ab +t⟩e−iωteikixi = −2π2δabδ(t)xi +r3 , +(6.39) +Gab +ij (x − y)ξ = +� � dωd3⃗k +(2π)4 ⟨ξa +i ab +j⟩e−iωteikixi = −π +2 δij δ3(⃗x) − 2π3{δij +r3 − 3xixj +r5 +}, (6.40) +Gab +ij (x − y)ξξ = +� � dωd3⃗k +(2π)4 ⟨ξa +i ξb +j⟩e−iωteikixi = δab2πt +� +δij∂2δ3(⃗x) − ∂i∂jδ3(⃗x) +� +. +(6.41) +In the first two propagators, Λ = +� ∞ +0 +sinθ +θ3 dθ, this is a divergent integration. To regularize +it +Λ = +� ∞ +0 +sinθ +θ3 dθ = +� ∞ +0 +1 +θ2 dθ − π +4 = lim +ϵ→0 +� � ∞ +ϵ +1 +θ2 dθ − π +4 +� += lim +ϵ→0 +�1 +ϵ − π +4 +� +(6.42) +Propagators in position space have delta functions in all of the above cases, but some +delta functions are of time, and some are in spatial coordinates. This finding is somewhat +surprising as one does not expect to find a mixture of spatial and temporal delta functions +in the magnetic sector, only temporal delta functions. This is a pointer perhaps that all +Carrollian magnetic theories would not be reducible to lower dimensional Euclidean CFTs +as was shown in the scalar case in [76]. This point requires further investigation. +7 +Conclusions and Discussions +In this paper, we have analyzed the Carrollian limit of the Yang-Mills theory systematically, +and obtained electric and magnetic sectors with one subsector of each of the electric and +– 28 – + +magnetic sectors having non-abelian or self-interaction terms while the other subsector +having copies of the Carrollian abelian theory. The Carrollian abelian theory found here is +consistent with that discussed in [42]. This is a first action formulation for the Carrollian +Yang-Mills theory. We have obtained the Carrollian Yang-Mills actions by taking the a +small c-expansion of the Poincar´e invariant Yang-Mills action, where we observed that +different values of the parameter δ, used in the small c-expansion of the gauge fields, lead +to different sectors for the Carrollian Yang-Mills theory. In particular, for δ = 0, we get +two non-trivial Carrollian Yang-Mills theories, and for any non-zero value of δ (that we +have taken to be δ = 1 for simplicity), we get copies of the Carrollian abelian theory. In 4- +dimensions, all these four sectors are found to be invariant under infinite CCA. The energy- +momentum tensor for all the four sectors were calculated and were found to be conserved +using the equations of motion and the Bianchi identities. We have also calculated Noether +charges for all four sectors, and found that there are no central extensions in the algebra of +the charges. This is unlike the Galilean Yang-Mills theories in [67], where we have seen that +there is a state-dependent central extension in the algebra of the charges. Finally, we listed +all the Feynman rules to understand the quantum properties of the Carrollian Yang-Mills +theory, with a detailed analysis kept for a future work. Further, we also calculated the +propagators in position space, and from these we explicitly saw the ultra-local behavior of +the Carrollian theory. +There are a number of immediate directions for future work. +The construction of +different Carrollian Yang-Mills is our first step toward our goal of understanding the full +quantum properties of Carrollian Yang-Mills theory. +In our subsequent work, we will +study the quantum structure of CYM theory, free and with matter fields, and investigate +the different types of actions we have found in this work. In [29], authors studied the +algebraic structure of Carrollian supersymmetric theory, and in the near future, we want +to construct the Carrollian version of N = 4 Super Yang-Mills theory and understand its +role in flat space holography. The different actions we have found in this paper would be +a starting point for the supersymmetrization of CYM theory. +Acknowledgments +We would like to first thank Arjun Bagchi for fruitful discussions, necessary suggestions, +and valuable comments on the manuscript. We would also like to thank Nilay Kundu, +Kedar Kolekar, and Sudipta Dutta for fruitful discussions. +A +Rotation and Boost invariance +Electric(δ=0) +Rotation:- Under rotation fields transform as +δMijaa(0) +t += (xi∂j − xj∂i)aa(0) +t +(A.1) +δMijaa(0) +k += (xi∂j − xj∂i)aa(0) +k ++ (δikaa(0) +j +− δjkaa(0) +i +) +(A.2) +– 29 – + +The action (4.17) changes under these transformations as +δMijL(0) = ∂j +� +xiEa(0) +k +Ea(0) +k +� +− ∂i +� +xjEa(0) +k +Ea(0) +k +� +(A.3) +The action is rotation invariant. +Boost:- Fields transform as +δBiaa(0) +t += xi∂taa(0) +t ++ q1a(0)a +i +(A.4) +δBiaa(0) +j += xi∂ta(0) +j ++ q2δijaa(0) +t +(A.5) +The action (4.17) changes under these transformations as +δBkL(0) = ∂t +�xk +2 Ea(0) +i +Ea(0) +k +� +(A.6) +So the action is invariant under boost, if the constants q1 and q2 respectively are 0 and 1. +Magnetic(δ=0) +Rotation:- Fields transform as +δMijξa +k = +� +xi∂j − xj∂i +� +xa +k + δikξa +j − δjkξi +(A.7) +δMijaa(0) +t += (xi∂j − xj∂i)aa(0) +t +(A.8) +δMijaa(0) +k += (xi∂j − xj∂i)aa(0) +k ++ (δikaa(0) +j +− δjkaa(0) +i +) +(A.9) +action (4.58) changes as +δMijLNLO = ∂i +� +xjL +� +− ∂j +� +xiL +� +(A.10) +The action is rotation invariant. +Boost:- Fields transform as +δBkaa +t = xk∂taa +t + q1aa +k +(A.11) +δBkaa +i = xk∂taa +i + q2δikaa +t +(A.12) +δBkξa +i = xk∂tξa +i + q3fa +ik +(A.13) +action (4.58) changes as +δBkL = ∂t +� +xkL +� +(A.14) +So the action is invariant under boost, if the constant q1, q2 and q3 respectively are 0, -1 +and 1. +Electric(δ=1) +Rotation:- Fields transform as +δMijaa(0) +t += (xi∂j − xj∂i)aa(0) +t +(A.15) +δMijaa(0) +k += (xi∂j − xj∂i)aa(0) +k ++ (δikaa(0) +j +− δjkaa(0) +i +) +(A.16) +– 30 – + +action (4.1) change as +δMij ˜L(0) = ∂j +�1 +2xi ˜Ea(0) +k +˜Ea(0) +k +� +− ∂i +�1 +2xj ˜Ea(0) +k +˜Ea(0) +k +� +(A.17) +So the action is invariant under rotation. +Boost Fields transform as +δBiaa(0) +t += xi∂taa(0) +t +(A.18) +δBiaa(0) +j += xi∂ta(0) +j ++ δijaa(0) +t +(A.19) +action (4.1) change as +δBi ˜L(0) = ∂t +�1 +2xi ˜Ea(0) +k +˜Ea(0) +k +� +(A.20) +So the action is invariant under boost. +Magnetic(δ=1) +Rotation:- Fields transform as +δMijξa +k = +� +xi∂j − xj∂i +� +xa +k + δikξa +j − δjkξi +(A.21) +δMijaa(0) +t += (xi∂j − xj∂i)aa(0) +t +(A.22) +δMijaa(0) +k += (xi∂j − xj∂i)aa(0) +k ++ (δikaa(0) +j +− δjkaa(0) +i +) +(A.23) +action(4.40) changes as +δMijLNLO = ∂i +� +xjL +� +− ∂j +� +xiL +� +(A.24) +So the action is invariant under rotation. +Boost:- Fields transform as +δBkaa +t = xk∂taa +t +(A.25) +δBkaa +i = xk∂taa +i − δikaa +t +(A.26) +δBkξa +i = xk∂tξa +i + ˜fa +ik +(A.27) +Action(4.40) change as +δBkL = ∂t +� +xkL +� +(A.28) +So the action is invariant under boost. +B +Charge Algebra +In sec.5 have Noether’s charge and discussion on charge algebra. In this appendix, we give +a charge in pre-symplectic language with some examples. +– 31 – + +Electric Sector +Using the expression of Θ, we can define the Poisson bracket for the electric sector as +Ω(δ1, δ2) = +� +d3x +� +δ1Θ(δ2) − δ2Θ(δ1) +� += +� +d3x +� +δ1aa(0) +i +δ2Ea(0) +i +− δ2aa(0) +i +δ1Ea(0) +i +� +(B.1) +Let see some commutation of Conformal Carrollian algebra. +[P, P]: +δPiaa(0) +j += ∂iaa(0) +j +, +δPiaa(0) +t += ∂iaa(0) +t +(B.2) +Using these expression in the Eq.(B.1) we will get +Ω(δPl, δPk) = +� +d3x +� +∂laa(0) +i +∂kEa(0) +i +− ∂kaa(0) +i +∂lEa(0) +i +� += +� +d3x +� +∂l(aa(0) +i +∂kEa(0) +i +) − ∂k(aa(0) +i +∂lEa(0) +i +) +� += 0 +(B.3) +Last equality is zero because of the total derivative in the previous step. +[P, M]: +δMf aa(0) +t += f(x)∂taa(0) +t +, +δMf aa(0) +i += f(x)∂taa(0) +i ++ aa(0) +t +∂if(x) +(B.5a) +Ω(δPl, δMf(x)) = +� +d3x +� +∂laa(0) +i +f(x)∂tEa(0) +i +− (f(x)∂taa(0) +i ++ aa(0) +t +∂lf(x))∂lEa(0) +i +� += +� +d3x +� +∂lf(x)Ea(0) +i +Ea(0) +i ++ f(x)∂laa(0) +i +∂tEa(0) +i ++ f(x)aa(0) +t +∂t∂iEa(0) +i +� += Qelectric(Mh) +where +h = ∂lf(x) +(B.5) +[D, M]: +Ω(δD, δMf(x)) = Qelectric(Mh1) +where +h1 = −f(x) + xk∂kf(x) +(B.6) +[Ki, Mf] +Ω(δKi, δMf ) = Qelectric(Mh2), +where +h2 = (2xixk∂k − xkxk∂i − 2xi)f(x) +(B.7) +Expression of the Qelectric is given in Sec.5. +Magnetic Sector +Using the expression of Θ, we can define the Poisson bracket for the magnetic sector as +Ω(δ1, δ2) = +� +d3x +� +δ1Θ(δ2) − δ2Θ(δ1) +� += +� +d3x +� +δ1aa +i δ2ξa +i − δ2aa +i δ1ξa +i +� +(B.8) +Similar to electric case we can see +– 32 – + +[P, P]: +Using these expression in the Eq.(B.1) we will get +Ω(δPl, δPk) = 0 +(B.9) +Last equality is zero because of the total derivative in the previous step. +[P, M] +: Using the transformation given in previous section +Ω(δPl, δMf(x)) = QMagnetic(Mh) +where +h = ∂lf(x) +(B.10) +[D, M]: +Ω(δD, δMf(x)) = QMagnetic(Mh) +where +h1 = −f(x) + xk∂kf(x) +(B.11) +[Ki, Mf] +Ω(δKi, δMf ) = QMagnetic(Mh1), +where +h2 = (2xixk∂k − xkxk∂i − 2xi)f(x) (B.12) +Expression of the Qmagnetic is given in Sec.5. +C +Discussion on previous work on Carrollian Yang-Mills theory +In [15], authors discussed Carrollian Yang-Mills theory at the level of equations of motion. +In their analysis for the SU(2) theory, there are four different sectors of Carrollian Yang- +Mills equations of motion. For the details discussion readers are encouraged to see the +references mentioned above. Here we will do a similar analysis and see how we can relate +our results to the previous analysis. The relativistic equations of motion is +∂µF a +µν + gfabcAb +µF c +µν = DµF a +µν = 0, +(C.1) +we can write temporal and spatial part as +∂iF a +i0 + gfabcAb +iF c +i0 = DiF a +i0 = 0 +(C.2) +∂iF a +ij + gfabcAb +iF c +ij = DiF a +ij = 0. +(C.3) +To derive the Carrollian Yang-Mills equations of motion using the formalism discussed in +[15] we have to scale t, x and all the fields of the theory along with coupling (g) as +xi → ϵβxi, x0 → ϵβ+1t, Aa +i → ϵα+1aa +i , Aa +0 → ϵαaa +t , g → ϵγg, with ϵ → 0 +(C.4) +In this limit the consistent equations of motions are +∂iEa +i + gfabcab +iEc +i = DiEa +i = 0 +(C.5) +∂ifa +ij + gfabcab +ifc +ij = Difa +ij = 0 +(C.6) +if +γ = −(α + β + 1). +– 33 – + +where Ea +i = ∂taa +i − ∂iaa +t + gfabcaa +t aa +i , fa +ij = ∂iaa +j − ∂jaa +i + gfabcaa +i aa +j. These equations of +motion are same as eq.(4.18). The equations of motion we get here by scaling of fields are +reproduced from the electric sector action discussed in the section.4.2. +In sec.4.1 we have another electric sector of Carrollian Yang-Mills theory which is +copies of the electric sector of Carrollian abelian theory. The equations of motion of this +electric sector are computed previously in [15]. +This paper’s magnetic sector equations of motion do not match the previous works done +in [15] because these results derive from the relativistic theory with a Lagrange Multiplier. +References +[1] J. M. Maldacena, The Large N limit of superconformal field theories and supergravity, Adv. +Theor. Math. Phys. 2 (1998) 231 [hep-th/9711200]. +[2] C. Duval, G. Burdet, H. P. Kunzle and M. Perrin, Bargmann Structures and Newton-cartan +Theory, Phys. Rev. D 31 (1985) 1841. +[3] C. Duval and P. A. Horvathy, Non-relativistic conformal symmetries and Newton-Cartan +structures, J. Phys. A 42 (2009) 465206 [0904.0531]. +[4] D. Van den Bleeken and C. Yunus, Newton-Cartan, Galileo-Maxwell and Kaluza-Klein, +Class. Quant. Grav. 33 (2016) 137002 [1512.03799]. +[5] E. Bergshoeff, J. Rosseel and T. Zojer, Non-relativistic fields from arbitrary contracting +backgrounds, Class. Quant. Grav. 33 (2016) 175010 [1512.06064]. +[6] D. Hansen, J. Hartong and N. A. Obers, Non-Relativistic Gravity and its Coupling to +Matter, JHEP 06 (2020) 145 [2001.10277]. +[7] D. Hansen, J. Hartong and N. A. Obers, Non-relativistic expansion of the Einstein-Hilbert +Lagrangian, in 15th Marcel Grossmann Meeting on Recent Developments in Theoretical and +Experimental General Relativity, Astrophysics, and Relativistic Field Theories, 5, 2019, +1905.13723. +[8] D. Hansen, J. Hartong, N. A. Obers and G. Oling, Galilean first-order formulation for the +nonrelativistic expansion of general relativity, Phys. Rev. D 104 (2021) L061501 +[2012.01518]. +[9] M. Ergen, E. Hamamci and D. Van den Bleeken, Oddity in nonrelativistic, strong gravity, +Eur. Phys. J. C 80 (2020) 563 [2002.02688]. +[10] L. Susskind, Holography in the flat space limit, AIP Conf. Proc. 493 (1999) 98 +[hep-th/9901079]. +[11] A. Bagchi and R. Fareghbal, BMS/GCA Redux: Towards Flatspace Holography from +Non-Relativistic Symmetries, JHEP 10 (2012) 092 [1203.5795]. +[12] A. Bagchi, Correspondence between Asymptotically Flat Spacetimes and Nonrelativistic +Conformal Field Theories, Phys. Rev. Lett. 105 (2010) 171601 [1006.3354]. +[13] A. Bagchi, R. Basu, D. Grumiller and M. Riegler, Entanglement entropy in Galilean +conformal field theories and flat holography, Phys. Rev. Lett. 114 (2015) 111602 [1410.4089]. +[14] A. Bagchi and R. Basu, 3D Flat Holography: Entropy and Logarithmic Corrections, JHEP +03 (2014) 020 [1312.5748]. +– 34 – + +[15] A. Bagchi, R. Basu, A. Kakkar and A. Mehra, Flat Holography: Aspects of the dual field +theory, JHEP 12 (2016) 147 [1609.06203]. +[16] G. Barnich and G. Compere, Classical central extension for asymptotic symmetries at null +infinity in three spacetime dimensions, Class. Quant. Grav. 24 (2007) F15 [gr-qc/0610130]. +[17] G. Barnich and C. Troessaert, Aspects of the BMS/CFT correspondence, JHEP 05 (2010) +062 [1001.1541]. +[18] G. Barnich, A. Gomberoff and H. A. Gonzalez, The Flat limit of three dimensional +asymptotically anti-de Sitter spacetimes, Phys. Rev. D 86 (2012) 024020 [1204.3288]. +[19] G. Barnich, Entropy of three-dimensional asymptotically flat cosmological solutions, JHEP +10 (2012) 095 [1208.4371]. +[20] L. Ciambelli, C. Marteau, A. C. Petkou, P. M. Petropoulos and K. Siampos, Flat holography +and Carrollian fluids, JHEP 07 (2018) 165 [1802.06809]. +[21] A. Bagchi, A. Banerjee and H. Muraki, Boosting to BMS, 2205.05094. +[22] S. Pasterski, M. Pate and A.-M. Raclariu, Celestial Holography, in 2022 Snowmass Summer +Study, 11, 2021, 2111.11392. +[23] A.-M. Raclariu, Lectures on Celestial Holography, 2107.02075. +[24] S. Pasterski, Lectures on celestial amplitudes, Eur. Phys. J. C 81 (2021) 1062 [2108.04801]. +[25] C. Dappiaggi, BMS field theory and holography in asymptotically flat space-times, JHEP 11 +(2004) 011 [hep-th/0410026]. +[26] C. Dappiaggi, V. Moretti and N. Pinamonti, Rigorous steps towards holography in +asymptotically flat spacetimes, Rev. Math. Phys. 18 (2006) 349 [gr-qc/0506069]. +[27] A. Bagchi, A. Mehra and P. Nandi, Field Theories with Conformal Carrollian Symmetry, +JHEP 05 (2019) 108 [1901.10147]. +[28] A. Bagchi, R. Basu, A. Mehra and P. Nandi, Field Theories on Null Manifolds, JHEP 02 +(2020) 141 [1912.09388]. +[29] A. Bagchi, D. Grumiller and P. Nandi, Carrollian superconformal theories and super BMS, +2202.01172. +[30] C. Duval, G. W. Gibbons and P. A. Horvathy, Conformal Carroll groups and BMS +symmetry, Class. Quant. Grav. 31 (2014) 092001 [1402.5894]. +[31] C. Duval, G. W. Gibbons and P. A. Horvathy, Conformal Carroll groups, J. Phys. A47 +(2014) 335204 [1403.4213]. +[32] A. Bagchi, S. Banerjee, R. Basu and S. Dutta, Scattering Amplitudes: Celestial and +Carrollian, 2202.08438. +[33] L. Donnay, A. Fiorucci, Y. Herfray and R. Ruzziconi, A Carrollian Perspective on Celestial +Holography, 2202.04702. +[34] L. Donnay and C. Marteau, Carrollian Physics at the Black Hole Horizon, Class. Quant. +Grav. 36 (2019) 165002 [1903.09654]. +[35] G. Dautcourt, On the ultrarelativistic limit of general relativity, Acta Phys. Polon. B 29 +(1998) 1047 [gr-qc/9801093]. +– 35 – + +[36] J. Hartong, Gauging the Carroll Algebra and Ultra-Relativistic Gravity, JHEP 08 (2015) 069 +[1505.05011]. +[37] E. Bergshoeff, J. Gomis, B. Rollier, J. Rosseel and T. ter Veldhuis, Carroll versus Galilei +Gravity, JHEP 03 (2017) 165 [1701.06156]. +[38] C. Duval, G. W. Gibbons, P. A. Horvathy and P. M. Zhang, Carroll versus Newton and +Galilei: two dual non-Einsteinian concepts of time, Class. Quant. Grav. 31 (2014) 085016 +[1402.0657]. +[39] L. Ciambelli and C. Marteau, Carrollian conservation laws and Ricci-flat gravity, Class. +Quant. Grav. 36 (2019) 085004 [1810.11037]. +[40] K. Morand, Embedding Galilean and Carrollian geometries I. Gravitational waves, J. Math. +Phys. 61 (2020) 082502 [1811.12681]. +[41] L. Ciambelli, R. G. Leigh, C. Marteau and P. M. Petropoulos, Carroll Structures, Null +Geometry and Conformal Isometries, Phys. Rev. D 100 (2019) 046010 [1905.02221]. +[42] J. de Boer, J. Hartong, N. A. Obers, W. Sybesma and S. Vandoren, Carroll symmetry, dark +energy and inflation, 2110.02319. +[43] L. Ciambelli, C. Marteau, A. C. Petkou, P. M. Petropoulos and K. Siampos, Covariant +Galilean versus Carrollian hydrodynamics from relativistic fluids, Class. Quant. Grav. 35 +(2018) 165001 [1802.05286]. +[44] A. C. Petkou, P. M. Petropoulos, D. R. Betancour and K. Siampos, Relativistic Fluids, +Hydrodynamic Frames and their Galilean versus Carrollian Avatars, 2205.09142. +[45] L. Freidel and P. Jai-akson, Carrollian hydrodynamics from symmetries, 2209.03328. +[46] L. Freidel and P. Jai-akson, Carrollian hydrodynamics and symplectic structure on stretched +horizons, 2211.06415. +[47] J. Redondo-Yuste and L. Lehner, Non-linear black hole dynamics and Carrollian fluids, +2212.06175. +[48] R. M. Nandkishore and M. Hermele, Fractons, Ann. Rev. Condensed Matter Phys. 10 (2019) +295 [1803.11196]. +[49] L. Bidussi, J. Hartong, E. Have, J. Musaeus and S. Prohazka, Fractons, dipole symmetries +and curved spacetime, 2111.03668. +[50] A. P´erez and S. Prohazka, Asymptotic symmetries and soft charges of fractons, 2203.02817. +[51] A. Bagchi, A. Banerjee, R. Basu, M. Islam and S. Mondal, Magic Fermions: Carroll and +Flat Bands, 2211.11640. +[52] D. Hansen, N. A. Obers, G. Oling and B. T. Søgaard, Carroll Expansion of General +Relativity, 2112.12684. +[53] A. Bagchi, Tensionless Strings and Galilean Conformal Algebra, JHEP 05 (2013) 141 +[1303.0291]. +[54] A. Bagchi, S. Chakrabortty and P. Parekh, Tensionless Strings from Worldsheet Symmetries, +JHEP 01 (2016) 158 [1507.04361]. +[55] A. Bagchi, A. Banerjee, S. Chakrabortty, S. Dutta and P. Parekh, A tale of three — +tensionless strings and vacuum structure, JHEP 04 (2020) 061 [2001.00354]. +– 36 – + +[56] A. Bagchi, M. Mandlik and P. Sharma, Tensionless tales: vacua and critical dimensions, +JHEP 08 (2021) 054 [2105.09682]. +[57] A. Bagchi, A. Banerjee, S. Chakrabortty and R. Chatterjee, A Rindler Road to Carrollian +Worldsheets, 2111.01172. +[58] A. Banerjee, S. Dutta and S. Mondal, Carroll fermions in two dimensions, 2211.11639. +[59] Z.-f. Yu and B. Chen, Free field realization of the BMS Ising model, 2211.06926. +[60] P.-X. Hao, W. Song, Z. Xiao and X. Xie, A BMS-invariant free fermion model, 2211.06927. +[61] M. L. Bellac and J.-M. Levy-Leblond, Galilean Electromagnetism, Nuovo Cimento. 14B +(1973) . +[62] A. Bagchi, R. Basu and A. Mehra, Galilean Conformal Electrodynamics, JHEP 11 (2014) +061 [1408.0810]. +[63] A. Bagchi, R. Basu, A. Kakkar and A. Mehra, Galilean Yang-Mills Theory, JHEP 04 (2016) +051 [1512.08375]. +[64] A. Bagchi, J. Chakrabortty and A. Mehra, Galilean Field Theories and Conformal Structure, +JHEP 04 (2018) 144 [1712.05631]. +[65] G. Festuccia, D. Hansen, J. Hartong and N. A. Obers, Symmetries and Couplings of +Non-Relativistic Electrodynamics, JHEP 11 (2016) 037 [1607.01753]. +[66] K. Banerjee, R. Basu and A. Mohan, Uniqueness of Galilean Conformal Electrodynamics and +its Dynamical Structure, JHEP 11 (2019) 041 [1909.11993]. +[67] A. Bagchi, R. Basu, M. Islam, K. S. Kolekar and A. Mehra, Galilean Gauge Theories from +Null Reductions, 2201.12629. +[68] S. Chapman, L. Di Pietro, K. T. Grosvenor and Z. Yan, Renormalization of Galilean +Electrodynamics, JHEP 10 (2020) 195 [2007.03033]. +[69] K. Banerjee and A. Sharma, Quantization of Interacting Galilean Field theories, 2205.01918. +[70] L. Leblond, Une nouvelle limite non-relativiste du group de Poincar´e, Annales Poincare +Phys.Theor. 3 (1965) 1 . +[71] N. Sen Gupta, On an Analogue of the Galileo Group, Nuovo Cim. 54 (1966) 512 • DOI: +10.1007/BF02740871 . +[72] R. Basu and U. N. Chowdhury, Dynamical structure of Carrollian Electrodynamics, JHEP +04 (2018) 111 [1802.09366]. +[73] K. Banerjee, R. Basu, A. Mehra, A. Mohan and A. Sharma, Interacting Conformal Carrollian +Theories: Cues from Electrodynamics, Phys. Rev. D 103 (2021) 105001 [2008.02829]. +[74] M. Henneaux and P. Salgado-Rebolledo, Carroll contractions of Lorentz-invariant theories, +JHEP 11 (2021) 180 [2109.06708]. +[75] J. Levy-Leblond, Une nouvelle limite non-relativiste du group de Poincare, Ann. Inst. H. +Poincare 3 (1965) 1. +[76] S. Baiguera, G. Oling, W. Sybesma and B. T. Søgaard, Conformal Carroll Scalars with +Boosts, 2207.03468. +– 37 – + diff --git a/vtAzT4oBgHgl3EQfB_rN/content/tmp_files/load_file.txt b/vtAzT4oBgHgl3EQfB_rN/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..bf377711af1ccc784b1f028facc3d54de24f33f7 --- /dev/null +++ b/vtAzT4oBgHgl3EQfB_rN/content/tmp_files/load_file.txt @@ -0,0 +1,1451 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf,len=1450 +page_content='Carrollian Yang-Mills Theory Minhajul Islam Indian Institute of Technology Kanpur, Kalyanpur, Kanpur 208016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' INDIA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' E-mail: minhajul@iitk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='in Abstract: By doing a small c (speed of light) expansion of SU(N) Yang-Mills fields, we construct two different electric and two different magnetic sectors actions of Carrollian Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For both electric and magnetic cases, one sector contains non-trivial self-interaction, and another is N2 −1 copies of respective sector Carrollian abelian theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In d = 4 , all the four sectors are invariant under infinite Carrollian Conformal symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' There are no central extensions when analyzing charge algebra at the phase space level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Lastly, we compute propagators for all four sectors and vertices for two non-trivial sectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Propagators in position space show ultra-local behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='00953v1 [hep-th] 3 Jan 2023 Contents 1 Introduction 1 2 Carrollian Conformal Algebra and Representation 4 3 Yang-Mills action and small c-expansion 6 4 Carrollian Yang-Mills actions 9 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1 Electric Action I 9 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2 Electric Action II 11 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 Magnetic Action I 14 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4 Magnetic Action II 17 5 Noether charges and Charge algebra 20 6 Propagator and Vertices 21 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1 Electric Sector I 22 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2 Magnetic sector I 23 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 Electric sector II 24 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4 Magnetic sector II 25 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5 Propagators in position space 27 7 Conclusions and Discussions 28 A Rotation and Boost invariance 29 B Charge Algebra 31 C Discussion on previous work on Carrollian Yang-Mills theory 33 1 Introduction The construction of the spectacularly successful Standard Model of particle physics, which describes nature around us, is based on the foundation of relativistic quantum field theory (QFT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' But, often, to describe real life systems, it is desirable to look at approximations and limits of the more fundamental theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gauge theories are the backbone of theoretical physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Three of the four fundamen- tal forces of nature are explained by Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Even the first example of the most promising formalism to understand Quantum gravity, called AdS/CFT holographic duality, is constructed using a supersymmetric version of Yang-Mills theory [1] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The – 1 – AdS/CFT holographic duality relates the d + 1-dimensional gravitational theory to the d-dimensional field theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' More specifically, [1] connects a string theory living on five- dimensional Anti-de Sitter (AdS) spacetimes (times a five-sphere) and N = 4 SU(N) Su- persymmetric Yang-Mills (SYM) theory which is a four-dimensional conformal field theory living on the boundary of AdS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this paper, we will look at Yang-Mills theories from a different perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We will attempt to understand the theory in the limit when the speed of light goes to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The diametrically opposite limit, where c → ∞ is clearly of physical interest as it describes Galilean or non-relativistic (NR) physics, and is useful to describe a range of day to day physical systems like hydrodynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Below we clarify why the other limit, called the Carrollian limit, is important.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we adopt a group-theoretic approach to understand QFT at these two different (Galilean and Carrollian) limits, we would begin from the Poincar´e algebra and take the large c (speed of light) limit and small c limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The two symmetry algebras that would be obtained as a result are different and are the familiar Galilean algebra, and the not- so-familiar Carrollian algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In both these limits, many interesting counter-intuitive concepts emerge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In both cases, spacetime metrics degenerate, light-cones open up for non- relativistic theory and close up for Carrollian theory, and symmetry algebra gets enhanced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Non-relativistic theories, corresponding to c → ∞, are important for condensed matter physics, non-AdS holography, and hydrodynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this limit, as mentioned previously the metric degenerates, spacetime loses its Reimmanian structure, and a new spacetime structure emerges called Newton-Cartan spacetime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Selected references on the construction of non-relativistic field theories and related Newton-Cartan spacetime structures are [2– 6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In connection with the construction of symmetries, one of the interesting techniques to construct non-relativistic physics is to start from a Poincar´e invariant theory and do a large c-expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using this approach we get many interesting insights into non-relativistic physics like order-wise enhanced symmetry algebra, and actions [6–9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Our main focus in this paper is the other limit corresponding to c → 0, which is called the Carrollian limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' At first sight, sending the speed of light to zero may seem unnatural and the expectation is that this would lead to unphysical models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' But recently, this particular limit has been resurgent with different applications, mainly connected to the understanding of flat space holography [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' As mentioned before, one of the most promising tools to understand Quantum gravity is the AdS/CFT duality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In the limit of infinite radius of curvature, AdS spacetime become flat spacetime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' On the dual side, the infinite radius limit corresponds to sending the speed of light to zero [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The boundary theory thus becomes a Carrollian conformal field theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Some important references for holography for asymptotically flat spacetime are [10–21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The understanding of flat space holography recently has taken two different directions, viz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Celestial holography and Car- rollian holography.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Celestial holography relates gravity in 4d asymptotically flat spacetimes to a 2d CFT living on the celestial sphere [22–24].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' On the other hand, Carrollian hologra- phy relates 4d asymptotically flat gravity to 3d Carrollian CFTs living on the entire null – 2 – boundary of 4d bulk spacetime [15, 25–31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Recently, some fascinating works have been done to connect both formalisms [32, 33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The most successful example of AdS/CFT is the original Maldacena correspondence relating N = 4 SU(N) Supersymmetric Yang-Mills theory in d = 4 to gravity in AdS5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' One of our long-term goals is to understand the flatspace version of the Maldacena corre- spondence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' As an important intermediate step, we wish to construct the Carrollian version of Super-Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This is the main motivation for constructing Carrollian Yang- Mills (CYM) theory and, in particular, actions for CYM in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Carrollian physics has also emerged in other interesting places and here we quickly summarize these exciting developments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Carrollian structure appear on any null hyper- surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Every black hole solutions of general relativity contains a horizon that is nothing but a null surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Carrollian structures on black hole horizons have been considered in [34].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Carrollian gravity may provide a tractable limit of general relativity and be useful for various physical context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This has been studied in [35–41].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Carroll theory is also important for cosmology, inflation [42], fluid mechanics [20, 39, 41, 43–47], fractons [48– 50], flat physics in condensed matter systems [51].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Inspired by large c-expansion and construction of non-relativistic physics, small c-expansion was introduced to understand Carrollian physics in [52].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Finally, the Carrollian limit of the string theory worldsheet leads to the very high energy tensionless regime of strings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This has been investigated in detail in [53–57].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Recently there has been some interesting work done on Carroll fermions [51, 58–60].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Before moving on to Carrollian gauge theories, which will be the focus in this paper, we briefly recall previous works on Galilean gauge theories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Galilean gauge theory for U(1) theory was first constructed long ago [61].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In [62–64] authors realized infinite-dimensional Galilean conformal symmetry at the level of equations of motion in Galilean abelian and Galilean Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Subsequently there is some detailed work on action construc- tions for both Galilean abelian [65, 66] and Yang-Mills theory [67].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quantum properties of Galilean scalar electrodynamics were studied in [68] and that of Galilean QED in [69].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The Carrollian algebra was first discussed in [70, 71].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' More recently Carroll confor- mal structures have been analyzed at the level equations of motion in [15, 27–29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In [72] Carrollian action was constructed for the so-called electric abelian theory, which is an in- teracting field theory with scalar field [42, 73].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using the small c-expansion, the magnetic sector of Carrollian abelian theory has been recently constructed [42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The conformal struc- ture of this magnetic action was analyzed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In [74] authors constructed off-shell Carrollian Yang-Mills theory in the Hamiltonian formulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' However, at present there is no action formulation for the Carrollian Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this paper, we construct Carrollian Yang-Mills actions using the small c-expansion technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We find four different sectors of Carrollian Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This construction depends on the power of c we consider during field expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' All four sectors exhibit infi- nite Carrollian conformal invariance in four spacetime dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The energy-momentum tensors for all four sectors are analyzed, and their conservation is established using equa- – 3 – tions of motion and Bianchi identities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' To see charge algebra, we calculate charges for all the four sectors and show that the symmetry is realized at the level of charge algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We begin our investigation of the quantum properties of the theory and calculate all the propagators and vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A detailed quantum mechanical analysis is kept for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Outline of the paper The paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We begin in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2 with a review of Carrollian conformal algebra (CCA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' After that, we talk about an infinite extension of the CCA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 we address relativistic Yang-Mills theory and its small c-expansion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We take expansion of fields as Aa µ = �∞ n=0 cλc2nAa(n) µ , where λ is a non-negative constant parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using λ = 0, we get the electric and the magnetic sectors of CYM with a non-trivial term or self-interaction term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For λ with any non-zero value, we get copies of the abelian electric and the abelian magnetic sectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Here for any non-zero value, we choose the lowest even integer value two, which is explained in detail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4, we address details of all the sectors of CYM action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For each sector, firstly, we give the action in a compact form, and write the equations of motion, and the gauge symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' After that, we show its invariance under infinite CCA in four spacetime dimen- sions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Finally, we analyze the energy-momentum tensor with its improved version and its conversation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5, we calculate Noether charges and check the charge algebra for these actions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6 we briefly discuss Feynman rules for propagators and vertices for all the four sectors along with the Feynman diagrams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this section, we also talk about propagators in position space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7 we conclude with a summary of our results and a list of future directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 2 Carrollian Conformal Algebra and Representation The UR or Carrollian symmetry can be obtained by performing an In¨on¨u–Wigner con- traction on the relativistic conformal generators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The corresponding contraction of the spacetime coordinates for a d-dimensional CFT is described as xi → xi, t → ϵ t;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' ϵ → 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) Here, i runs over the spatial coordinates i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' , d − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The above contraction can also be interpreted as taking the limit of vanishing speed of light, c → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The Carrollian generators are obtained by performing the space-time contraction on the parent relativistic generators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For example, we obtain Carrollian boost generator Brel i = −xi∂t − t∂i using (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) −−−−−−→ −1 ϵ xi∂t − t∂i redefined Bi −−−−−−−→ Bi = lim ϵ→0 ϵBrel i Carroll −−−−→ limit Bi = −xi∂t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) – 4 – The other Carrollian generators are also obtained by doing the analysis like above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' They are given by H = ∂t, Bi = −xi∂t, Ki = −2xj(t∂t + xi∂i) + xjxj∂i, K = xixi∂t, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3a) D = −(t∂t + xi∂i), Pi = ∂i, Jij = −(xi∂j − xj∂i) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3b) These generate the finite Conformal Carrollian Algebra (f-CCA), which is iso(d, 1) for a d-dimensional field theory [15, 27]: [Jij, Bk] = δk[iBj], [Jij, Pk] = δk[iPj], [Jij, Kk] = δk[iKj], [Bi, Pj] = δijH, [Bi, Kj] = δijK, [D, K] = −K, [K, Pi] = 2Bi, [Ki, Pj] = −2Dδij − 2Jij, [H, Ki] = 2Bi, [D, H] = H, [D, Pi] = Pi, [D, Ki] = −Ki.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4) The sub-algebra consisting of the generators {Jij, Bi, Pi, H} forms the c → 0 limit of the Poincar´e algebra viz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' the Carrollian algebra [75].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Unlike the relativistic conformal algebra, even in dimensions greater than two, it is possible to give the finite algebra in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4) an infinite-dimensional lift by introducing time translation generator with arbitrary spatial dependence Mf = f(xi)∂t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) Here, Mf generates the infinite set of super-translations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In the above expression f(xi) is an arbitrary function of the spatial co-ordinates xi, which we restrict to polynomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We obtain the finite generators of f-CCA, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=', Mf = H, Bi, K when f(xi) = 1, −xi, xkxk respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The super-translation generators Mf along with the finite set of generators {Bi, Jij, H, Pi, D, K, Ki} describe the infinite-dimensional CCA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For d ≥ 4 it can be written as [15, 72]: [Pi, Mf] = M∂if, [D, Mf] = M(−xi∂if+f), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6a) [Ki, Mf] = M2xif+xkxk∂if−2xixk∂kf, [Jij, Mf] = M−x[i∂j]f .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6b) For more details of the algebraic aspects of Carrollian conformal symmetry, the reader is pointed to [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this paper our focus is on spacetime dimension d = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Representation theory The representation theory of the CCA based on highest weights was first constructed in [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Further analysis on representation extended to fields of different integer and half- integer spins was given in [27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For the CCA, the states are labeled with the eigenvalues of rotation and dilatation generators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The construction of representation is summarized below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The Carrollian CFT fields are labeled with scaling dimension ∆ and spin j as [D, Φ(0, 0)] = ∆Φ(0, 0), [J2, Φ(0, 0)] = j(j + 1)Φ(0, 0) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7) – 5 – The action on a generic field of Carrollian rotation, space- and time-translation is given by [Jij, Φ(0, 0)] = ΣijΦ(0, 0), [H, Φ(t, xi)] = ∂tΦ(t, xi), [Pi, Φ(t, xi)] = ∂iΦ(t, xi) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8) The Carrollian conformal primaries are defined as [Ki, Φ(0, 0)] = 0, [K, Φ(0, 0)] = 0, [Mf, Φ(0, 0)] = 0 for polynomial degree > 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='9) The Carrollian boost acts on the primary non-trivially because the fields are not eigenstates of Carrollian boosts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The transformation of a generic field under Carrollian boosts can be written using the Jacobi identity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The action of Carroll boost on the fields is [Bi, Φ(0, 0)] = rϕi + sσiφ + s′σiχ + aAtδji + bAi + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' , (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10) where ϕ, {φ, χ}, {At, Ak} denote the primary fields of different spins (0, 1 2, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The con- stants r, {s, s′}, {a, b} cannot be determined just from the symmetries, but can only be fixed though dynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' One way to determine them is the limit c → 0 of the dynamics of the corresponding relativistic theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The above action of the Carroll boost can be generalized for any spin theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We use the conventional way to define a primary field Φ(t, xi) for the CCA at any spacetime point from the origin as Φ(t, x) = UΦ(0, 0)U −1, where U = e−tH−xiPi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11) The action of all the generators of the finite and infinite CCA on this generic Carrollian primary Φ(t, xi) can be written as [Jij, Φ(t, xi)] = (xi∂j − xj∂i)Φ(t, x) + ΣijΦ(t, xi), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12a) [Bj, Φ(t, xi)] = xj∂tΦ(t, x) − U[Bj, Φ(0, 0)]U −1, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12b) [D, Φ(t, xi)] = (t∂t + xi∂i + ∆)Φ(t, xi), (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12c) [Kj, Φ(t, xi)] = (2∆xj + 2xjt∂t + 2xixj∂i − 2xiΣij − xixi∂j) Φ(t, x) − 2t U[Bj, Φ(0, 0)]U −1, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12d) [Mf, Φ(t, x)] = f(xi)∂tΦ(t, x) + ∂jf U[Bj, Φ(0, 0)]U −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12e) This is a summary of CCA and its representation, which we have used extensively in the following sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We will see for our example what are the constants used in equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 3 Yang-Mills action and small c-expansion The Yang-Mills theory in (d + 1)-dimensions is described by the action SY M = � dd+1x LY M = � dd+1x � − 1 4F µνaF a µν � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) – 6 – and the equations of motion ∂µF µνa + gfabcAb µF µ˜νc = 0, (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) where a = 1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=', N2 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The non-abelian field strength (F a µν) is defined as F a µν = ∂µAa ν − ∂νAa µ + gfabcAb µAc ν.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Here, Aa µ is the gauge field and fabc are structure constants of the underlying gauge group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we write the above action making the speed of light c explicit, the resulting action is 1 S = � dd+1x � + 1 2c2 F a tiF a ti − 1 4F ijaF a ij � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) The action is divided into two parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The first part contains the temporal component of the gauge field (Aa t ) along with the spatial components (Aa i ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The second part is just dependent on the spatial components (Aa i ) of the gauge field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' To proceed with the small c-expansion, we write the gauge fields as an expansion in c as Aa t = ∞ � n=0 cλc2naa(n) t , Aa i = ∞ � n=0 cλc2naa(n) i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4) Using these expansions, the first part of the Lagrangian is 1 2c2 F a tiF a ti = 1 2 � c2λ−2 ∞ � n,m=0 (∂taa(n) i − ∂iaa(n) t )(∂taa(m) i − ∂iaa(m) t ) + c3λ−2 ∞ � n,m,l=0 2gfabc (∂taa(n) i − ∂iaa(n) t )ab(m) t ac(l) i + c4λ−2 ∞ � n,m,l,p=0 g2fabcfadeab(n) t ac(m) i ad(l) t ae(p) i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) If we look at only the first term above, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' for the abelian case, cλ becomes an overall factor and there will be only one result for different λ [42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' However, because of the self- interaction terms of the gauge fields in the second and third terms, we can not take out cλ as an overall factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This leads to distinct sectors of Carroll invariant non-abelian gauge theories corresponding to λ = 0 and λ ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Similarly, the second part (fully spatial part) of the Lagrangian is −1 4F a ijF a ij = −1 4 � c2λ ∞ � n,m=0 (∂iaa(n) j − ∂jaa(n) i )(∂iaa(m) j − ∂jaa(m) i ) + c3λ ∞ � n,m,l=0 2gfabc(∂iaa(n) j −∂jaa(n) i )ab(m) i ac(l) j + c4λ ∞ � n,m,l,p=0 g2fabcfadeab(n) i ac(m) j ad(l) i ae(p) j � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6) We generally expand Lagrangian in even powers of c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If the relativistic action did not contain any self-interaction term, there would not have been any problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We could have just taken cλ outside and written the Lagrangian in even power of c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' But in our case, to 1x0 = ct so ∂0 = 1 c ∂t and A0 = 1 c At – 7 – write the expansions in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6) in even powers of c, we have to choose λ as an even integer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We thus define λ = 2δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Then the two parts of the action become 1 2c2 F a tiF a ti = 1 2 � c4δ−2 ∞ � n,m=0 () + c6δ−2 ∞ � n,m,l=0 () + c8δ−2 ∞ � n,m,l,p=0 () � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7a) −1 4F a ijF a ij = −1 4 � c4δ ∞ � n,m=0 () + c6δ ∞ � n,m,l=0 () + c8δ ∞ � n,m,l,p=0 () � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7b) where () is a shorthand for the corresponding terms in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Now every term looks good.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' As argued earlier, λ = 0 and λ ̸= 0 (correspondingly δ = 0 and δ ̸= 0) give two distinct sectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For δ = 0, the resultant Carrollian actions describe non-abelian theories, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' these include the self-interaction terms, whereas for δ ̸= 0, the resultant Carrollian actions describe copies of the Carrollian abelian theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For δ = 0, the leading order Lagrangian, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' the coefficient of c−2 in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7) is L(0) = 1 2 � (∂taa(0) i − ∂iaa(0) t )(∂taa(0) i − ∂iaa(0) t ) + 2gfabc(∂taa(0) i − ∂iaa(0) t )ab(0) t ac(0) i +g2fabcfadeab(0) t ac(0) i ad(0) t ae(0) i � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8) and this is called the electric sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The next-to-leading order (NLO) Lagrangian (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' the coefficient of c0 in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7)), which is called the magnetic sector, is given by L(1) = � ∂taa(1) i − ∂iaa(1) t � Ea(0) i + gfabc� ∂taa(0) i − ∂iaa(0) t �� ab(0) t ac(1) i + ab(1) t ac(0) i � +g2 2 fabcfade� ab(1) t ac(0) i ad(0) t ae(0) i + ab(0) t ac(1) i ad(0) t ae(0) i + ab(0) t ac(0) i ad(1) t ae(0) i +ab(0) t ac(0) i ad(0) t ae(1) i � − 1 4fija(0)fa(0) ij , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='9) where Ea(0) i = ∂taa(0) i −∂iaa(0) t +gfabcaa(0) t aa(0) i and fa(0) ij = ∂iaa(0) j −∂jaa(0) i +gfabcaa(0) i aa(0) j .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For δ ̸= 0, all values of δ are equivalent, and thus we take δ = 1 for simplicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For δ = 1, we get that the total Lagrangian in (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7) has an expansion: L = c2 ˜L0 + c4 ˜L1 + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=', where the leading order Lagrangian (coefficient of c2) is ˜L(0) = 1 2(∂taa(0) i − ∂iaa(0) t )(∂taa(0) i − ∂iaa(0) t ), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10) and the next-to-leading order Lagrangian (coefficient of c4) is L(1) = ˜Ea(1) i ˜Ea(0) i + gfabc ˜Ea(0) i ab(0) t ac(0) i − 1 4 ˜fa(0) ij ˜fa(0) ij .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11) Here ˜Ea(0) i = (∂taa(0) i − ∂iaa(0) t ), ˜Ea(1) i = (∂taa(1) i − ∂iaa(1) t ) and ˜fa(0) ij = (∂iaa(0) j − ∂jaa(0) i ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Thus, taking λ (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' δ) to be zero or non-zero, we have obtained four Lagrangians: two of these are the so-called electric sector, and the other two are the so-called magnetic sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In the following sections, we will give details of all the four sectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 8 – 4 Carrollian Yang-Mills actions 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1 Electric Action I If we take δ = 1 in equation (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7), we can see the Leading order Lagrangian (coefficient of c2) is given by ˜L(0) = 1 2(∂taa(0) i − ∂iaa(0) t )(∂taa(0) i − ∂iaa(0) t ) = 1 2 ˜Ea(0) i ˜Ea(0) i , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) where ˜Ea(0) i = (∂taa(0) i − ∂iaa(0) t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Unlike the δ = 0 case that we will study in the next sub- section where the electric sector contains self-interaction, this just contains kinetic terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The corresponding equations of motion are ∂i∂taa(0) i − ∂i∂iaa(0) t = ∂i ˜Ea i = 0, ∂t∂taa(0) i − ∂t∂iaa(0) t = ∂t ˜Ea i = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) The action and the equations of motion are copies of the electric sector of Carrollian abelian theory discussed in [42], where Carrollian symmetry (boost and rotation) is analyzed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Below, we will see the action’s full infinite Carrollian conformal invariance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Boost and rotation invariance in our language are presented in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gauge symmetry The action here is just copies of the abelian action, so the gauge symmetry is like the abelian theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The transformations are given by aa(0) t → a ′a(0) t = aa(0) t + ∂tαa, aa(0) i → a ′a(0) i = aa(0) i + ∂iαa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) The action is invariant under the above gauge transformation, which are copies of the abelian gauge transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Spacetime symmetries In the previous section, we talked about the gauge symmetry of the action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We will use the action of CCA to find the symmetries of the action Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In the representation theory sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2, we have some undefined constants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The value of these constants depends on the fields of the theory under consideration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For example, the value of scaling dimension (∆) for fields will be fixed when we impose dilatation invariance of the action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Similarly, all other constants will be fixed when we impose other symmetries of the action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' All the four sectors of Lagrangian of the Carrollian Yang-Mills contain four sets of constants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Now let’s discuss the first action we have stated above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The action is trivially invariant under time and space translations (H, Pi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The in- variance of the action under rotation (Jij), boost (Bi) are shown in Appendix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Here we will only show the invariance under dilatation (D), spatial special conformal trans- formation (Ki), and supertranslation (Mf).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We know that for different values of f the supertranslation (Mf) operator contains Bi and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 9 – Dilatation: Using the action of dilatation operator described in (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12), we write the transformations of fields under the dilatation operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The transformations of aa(0) t and aa(0) i under dilatation is δDaa(0) t = (t∂t + xk∂k + ∆1)aa(0) t , δDaa(0) i = (t∂t + xk∂k + ∆2)aa(0) i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4) Using these transformations in action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1), we can see the action changes as δDL = ∂t � t ˜Ea(0) i ˜Ea(0) i � + ∂k � xk ˜Ea(0) i ˜Ea(0) i � if ∆1 = ∆2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) So the action is invariant under dilatation in four spacetime dimensions if the scaling dimension is one for both the temporal and spatial components of gauge fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Spatial SCT: Similar to the above case, the transformation of fields under spatial con- formal transformation is given below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Here we take the transformation with arbitrary constants introduced when we discussed representation theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The transformations are given by δKlaa(0) t = � 2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � aa(0) t + 2tqaa(0) l , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6a) δKlaa(0) i = � 2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � aa(0) i + 2δlixkaa(0) k − 2δlkxiaa(0) k + 2tq′δliaa(0) t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6b) Using these transformations in action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1), we can see the action changes as δKlL(0) = ∂t(xlt ˜Ea(0) i ˜Ea(0) i ) + ∂k � xkxl ˜Ea i ˜Ea i � − ∂l �1 2xkxk ˜Ea(0) i ˜Ea(0) i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7) So the action is invariant under spatial special conformal transformation if q = 0, q′ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Supertranslation: Instead of seeing the boost and temporal conformal transformation invariance of the action separately, we will see the supertranslation (Mf) invariance of the action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Fields transform under this operator(Mf) as δMf aa(0) t = f(x)∂taa(0) t , δMf aa(0) i = f(x)∂taa(0) i + aa(0) t ∂if(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8a) Using these transformations in action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1), the action changes as δMf L(0) = ∂t � f(x) ˜Ea(0) i ˜Ea(0) i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='9) So the action is invariant under Mf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Thus we see that the action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) is invariant under full infinite CCA in four spacetime dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 10 – Energy-Momentum tensor The components of the energy-momentum tensors for the action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) are given by T t t = ˜Ea(0) i ∂taa(0) i − ˜L(0), T t i = ˜Ea(0) j ∂iaa(0) j , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10) T i t = − ˜Ea(0) i ∂taa(0) t , T i j = −Ea(0) i ∂jaa(0) t − δi j ˜L(0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11) Using the improvement of energy-momentum tensor defined in [42], the improved energy- momentum tensor in our case is T µ ν = − δL δ∂µa(0)a α ∂νa(0)a α + δµ νL − � δµ k∂t − δµ t ∂k �� E(0)a k a(0)a ν � , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12) whose components are T t i = ˜Ea(0) j fa(0) ij , T i t = 0, T t t = 1 2 ˜Ea(0) j ˜Ea(0) j , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='13) T i j = ˜Ea(0) i ˜Ea(0) j − 1 2δi j ˜Ea(0) k ˜Ea(0) k .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='14) We can see the energy-momentum tensor is gauge invariant, traceless, and symmetric under the interchange of spatial indices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The T i t component of the stress tensor is zero as required by the Carroll symmetries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Below, we will see the conservation of energy-momentum tensor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The relativistic Bianchi identity for Yang-Mills is given in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='32).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Now we are considering Carrollian Yang-Mills theory with δ = 1, and the Carrollian Bianchi identities for this case are ∂t ˜fa jk + ∂j ˜fa kt + ∂k ˜fa tj = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='15a) ∂i ˜fa jk + ∂j ˜fa ki + ∂k ˜fa ij = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='15b) Tilde means there are no interaction terms in the field strength, only abelian terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' These are just copies of the Carrollian abelian Bianchi identity discussed in[42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using equations of motion and Carrollian Bianchi identity we can see ∂tT t t + ∂iT i t = 0, using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='15a), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='16a) ∂tT t j + ∂iT i j = 0, using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='15b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='16b) So the energy-momentum tensors satisfies conservation equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We will return to this section when discussing Noether charges and Quantum aspects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2 Electric Action II The electric sector action Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8), which has a non-abelian term, can be written in compact form: L0 = 1 2 � (∂taa(0) i − ∂iaa(0) t )(∂taa(0) i − ∂iaa(0) t ) + 2gfabc(∂taa(0) i − ∂iaa(0) t )ab(0) t ac(0) i +g2fabcfadeab(0) t ac(0) i ad(0) t ae(0) i � = 1 2Ea(0) i Ea(0) i , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17) – 11 – where Ea(0) i = ∂taa(0) i − ∂iaa(0) t + gfabcaa(0) t aa(0) i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The equations of motion following from the action are given by ∂iEa(0) i + gfabcab(0) i Ec(0) i = D(0) i Ea(0) i = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='18a) ∂tEa(0) i + gfabcab(0) t Ec(0) i = D(0) t Ea(0) i = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='18b) where DiOa = ∂iOa + gfabcab(0) i Oc, DtOa = ∂tOa + gfabcab(0) t Oc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gauge Symmetry The gauge transformations under which the action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17) is invariant are given by aa(0) t → aa(0)′ t = aa(0) t + 1 g∂tαa + fabcab(0) t αc, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='19a) aa(0) i → aa(0)′ i = aa(0) i + 1 g∂iαa + fabcab(0) i αc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='19b) This gauge transformation is the same as parent theory, but now we cannot write it in covariant form like relativistic theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Because, like the non-relativistic theory, the metrics in Carrollian theory are degenerate, and time and space are not on the same footing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Spacetime Symmetries Dilatation: Using the action of dilatation operator described in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12), we write the transformations of fields under the dilatation operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The transformations of aa(0) t and aa(0) i under dilatation is δDaa(0) t = (t∂t + xk∂k + ∆1)aa(0) t , δDaa(0) i = (t∂t + xk∂k + ∆2)aa(0) i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='20) Using these transformations in action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17), we can see the action changes as δDL = ∂t � tEa(0) i Ea(0) i � + ∂k � xkEa(0) i Ea(0) i � if ∆1 = ∆2 = 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='21) If the scaling dimensions of both fields ∆1 and ∆2 are one, then the action is dilatation invariant in four spacetime dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Spatial SCT: Similar to the above case, the transformation of fields under spatial con- formal transformation is given below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Here we take transformation with arbitrary constant introduced when we discussed representation theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The transformations are given by δKlaa(0) t = � 2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � aa(0) t + 2tqaa(0) l , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='22a) δKlaa(0) i = � 2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � aa(0) i + 2δlixkaa(0) k − 2δlkxiaa(0) k + 2tq′δliaa(0) t .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='22b) Using these transformations in action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17), we can see the action changes as δKlL(0) = ∂t(xltEa(0) i Ea(0) i ) + ∂k � xkxlEa i Ea i � − ∂l �1 2xkxkEa(0) i Ea(0) i � (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='23) if q = 0, q′ = 1, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='24) so here, we can see that the action is spatial special conformal invariant if the constants q and q ′, respectively, are zero and one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 12 – Supertranslation: Instead of seeing the boost and temporal conformal transformation invariance of the action separately, we will see the supertranslation (Mf) invariance of the action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Fields transform under this operator(Mf) as δMf aa(0) t = f(x)∂taa(0) t , δMf aa(0) i = f(x)∂taa(0) i + aa(0) t ∂if(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='25a) Using these transformations in action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17), the action changes as δMf L(0) = ∂t � f(x)Ea(0) i Ea(0) i � , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='26) so the action is invariant under supertranslation (Mf).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' So from the above analysis, we conclude that the action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17) is invariant under infinite CCA in spacetime dimension four if the scaling dimensions for both aa t and aa i are one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Energy-Momentum tensor The leading order Lagrangian or so-called electric sector is infinite Carrollian conformal in- variant in 4d spacetime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Now let’s see the energy-momentum tensor for the action Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17) and how we can improve it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Different components of energy-momentum tensor for action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17) are given by T t i = Ea(0) j ∂iaa(0) j , T i t = −Ea(0) i ∂taa(0) t , T t t = Ea(0) i ∂taa(0) i − L(0), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='27) T i j = −Ea(0) i ∂jaa t − δi jL(0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='28) We can see these are not gauge invariant, T i j component is not symmetric, and T i t component is not zero, so we have to improve it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using the improved energy-momentum tensor defined in [42], the improved energy-momentum tensor for our case is T µ ν = − δL δ∂µa(0)a α ∂νa(0)a α + δµ νL − � δµ k∂t − δµ t ∂k �� E(0)a k a(0)a ν � , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='29) and the components of improved E-M tensor are T t i = Ea(0) j fa(0) ij , T i t = 0, T t t = 1 2Ea(0) j Ea(0) j , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='30) T i j = Ea(0) i Ea(0) j − 1 2δi jEa(0) k Ea(0) k .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='31) We can see the energy-momentum tensor is gauge invariant, traceless, and symmetric under the interchange of spatial indices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The T i t component of the stress tensor is zero as required by Carroll symmetries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Below, we will see the conservation of energy-momentum tensor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Before going to the conservation of energy-momentum tensor, let’s see Bianchi’s iden- tity in the Carrollian limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The relativistic Bianchi identity for the Yang-Mills is given by DνF a βµ + DβF a µν + DµF a νβ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='32) When discussing the expansion of action, we discussed the different values of λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For different λ, we will also have two distinct Bianchi identities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For λ = 0 or (δ = 0), we have Bianchi – 13 – identity with a non-trivial self-interaction term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' And for λ = 2 or (δ = 1), we will have Bianchi identity copies of Carrollian abelian Bianchi identity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Which we have mentioned in the previous sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For λ = 0(δ = 0), Carrollian Bianchi identity is Dtfa jk + Djfa kt + Dkfa tj = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33a) Difa jk + Djfa ki + Dkfa ij = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33b) Using equations of motion and above Carrollian Bianchi identity (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33), we can see the conservation of energy-momentum tensor as ∂tT t t + ∂iT i t = 0, using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='18b), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='34) ∂tT t j + ∂iT i j = 0, using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='18a), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='35) This is our detailed discussion on the electric sector with non-abelian terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In the next section, we will focus on the magnetic sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In the electric sector, we have seen that the temporal components of field strength are dominated;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' in the subsequent section in the magnetic sector, we will see the purely spatial sector of field strength will dominate, and the temporal component will behave as a constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We will visit this electric sector again when we discuss Noether charges and quantum aspects of the theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 Magnetic Action I For δ = 1 case, the next to leading order(NLO) Lagrangian or the so called magnetic sector is given in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For convenience, let’s write the action again here L(1) = ˜Ea(1) i ˜Ea(0) i + gfabc ˜Ea(0) i ab(0) t ac(0) i − 1 4 ˜fa(0) ij ˜fa(0) ij .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='36) If we take the variation of the Lagrangian with respect to next to leading order fields aa(1) t , aa(1) i we will get Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2), the leading order equation of motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we take variation of the action w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='t aa(0) t , aa(0) i (the leading order fields) the equations of motion are ∂i ˜Ea(1) i + gfabc∂i � ab(0) t ac(0) i � + gfabcab(0) i ˜Ec(0) i = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='37a) ∂t ˜Ea(1) i + gfabc∂t � ab(0) t ac(0) i � + gfabcab(0) t ˜Ec(0) i − ∂k ˜fa(0) ki = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='37b) The action Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='36) and the above equations of motion are not Carroll invariant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' To make these Carroll invariant, we have to impose constraints ˜Ea(0) i = 0 in action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='36).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The corresponding equation of motion is ∂k ˜fa(0) ki = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='38) Similar to δ = 0 case, we will derive the Carrollian invariant magnetic sector for the δ = 1 case using the Lagrange multiplier in the parent action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We can start from relativistic Lagrangian with Lagrange multiplier ξa i , L = −c2 2 ξa i ξa i + ξa i F a 0i − 1 4F a ijF a ij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='39) – 14 – From equation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4), we can see that for δ = 1 case, before doing the expansion, every field gets scaled by c2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we scale every field by c2 of the above equation and collect the c4 term (for δ = 1 case, NLO action is of the order of c4 term of expansion (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7)), the resultant Lagrangian is ˜LNLO = ξa i ˜Ea i − 1 4 ˜fa ij ˜fa ij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40) If we vary the Lagrangian with respect to ξa i , we will get ˜Ea i = 0 constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' So all the equations of motion of the Lagrangian are ˜Ea i = 0, ∂iξa i = 0, ∂tξa i − ∂j ˜fa ji = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='41) We will see its gauge symmetry and full spacetime symmetry below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gauge symmetry The gauge symmetry of the action is not non-abelian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' It reduces to copies of abelian or u(1) symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The action is invariant under the gauge transformation aa t → a ′a t = aa t + ∂tαa, aa i → a ′a i = aa i + ∂iαa, ξa i → ξ ′a i = ξa i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='42) So the action is symmetric under n2 − 1 copies of abelian symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The Lagrange multi- plier ξa i behaves as a scalar under gauge transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Spacetime symmetries The action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40) is copies of the magnetic sector of Carrollian abelian theory discussed in [42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Carroll symmetry of the action is analyzed in that paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Here we will see Carrollian conformal invariance of the action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The transformations of different fields are the same as magnetic sector fields of δ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this section, we only give how action changes under dilatation, spatial SCT, and supertranslation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If readers want to see rotation and boost invariance of action, they can check the appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Dilatation: Transformations of gauge fields(aa t , aa i ) and Lagrange multiplier(ξa i ) under the dilatation operator(D) are given by δDaa t = (t∂t + xk∂k + ∆1)aa t , δDaa i = (t∂t + xk∂k + ∆2)aa i , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='43a) δDξa i = (t∂t + xk∂k + ∆ξ)ξa i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='43b) Using this transformation in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40), the change of action as δDL = ∂t � t ˜Ea i ˜Ea i � + ∂k � xk ˜Ea i ˜Ea i � + ∂t � − 1 4 ˜fa ij ˜fa ij � + ∂k � − 1 4 ˜fa ij ˜fa ij � if ∆1 = ∆2 = 1 and ∆ξ = 2, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='44) so the action is invariant under dilatation transformation in four spacetime dimensions if the scaling dimensions of the temporal and spatial component of gauge fields are one, and for ξi scaling dimension is two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 15 – Spatial SCT: Transformation of fields aa t , aa i and ξa i under spatial SCT are given by δKlaa t = � 2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � aa t + 2tqaa l , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='45a) δKlaa i = � 2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � aa i + 2δlixkaa k − 2δlkxiaa k + 2tq′δliaa t , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='45b) δKlξa i = � 4xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � ξa i + 2δlixkξa k − 2δlkxiξa k + 2tq′′δliaa t +2q′′′t ˜fa il, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='45c) using these transformations in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40), the action changes as δKlL(0) = ∂t(2xltξa i ˜Ea(0) i ) − ∂t �1 2txl ˜fa ij ˜fa ij � + ∂k � 2xkxlξa i ˜Ea i � − ∂k �1 2xkxl ˜fa ij ˜fa ij � −∂l � xkxkξa i ˜Ea(0) i � + ∂l �1 4xkxk ˜fa ij ˜fa ij � , if q = 0, q′ = 1, q′′ = 0, q′′′ = −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='46) So the action is invariant under spatial conformal transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Supertranslation: Lastly, invariance under supertranslation (Mf), which contains Hamil- tonian, temporal spacial conformal, and boost operator for different choice of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Under this operator, fields transform as δMf aa t = f(x)∂taa t , δMf aa i = f(x)∂taa i − aa t ∂if(x), δMf ξa i = f(x)∂tξa i − fa ik∂kf(x),(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='47) using these in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40), the actions changes as δMf L(0) = ∂t � f(x)ξa i ˜Ea(0) i − 1 4 ˜fa ij ˜fa ij � + ∂i � − 1 2 ˜fa ij ˜Ea j � + ∂j �1 2 ˜fa ij ˜Ea i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='48) So the action is invariant under Mf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The magnetic sector with δ = 1 case is invariant under infinite CCA in 4d spacetime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Energy-Momentum tensor Now we will see the energy-momentum tensor and its conservation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we derive the energy- momentum tensor from (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40) we will get T t i = ξa k∂iaa k, T i t = −xa i ∂taa t − ˜fa ik∂taa k (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='49) T t t = ξa i ∂taa k − L, T i j = −ξa i ∂jaa t − ˜fa ik∂jaa k − δi jL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='50) Here we also need an improved energy-momentum tensor as the electric sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Following [42], the improved energy-momentum tensor formula for the magnetic sector is T µν = − δL δ∂µaaα ∂νaa α + δµνL − δµt∂i [ξa i aa ν] + δµi � ∂t(ξa i aa ν) + ∂j( ˜fa ijaa ν) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='51) If we write it components wise explicitly T t i = ξa k ˜fa ik, T i t = 0, T t t = 1 4 ˜fa ij ˜fa ij, T i j = − ˜fa ik ˜fa jk − δi jL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='52) – 16 – Here we can see energy-momentum tensor is gauge invariant, traceless, symmetric in spatial indices, and T i t = 0 as expected for Carroll theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using equations of motion and Carrollian Bianchi identity, we can see ∂tT t t + ∂iT i t = 0, using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='41), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='53a) ∂tT t j + ∂iT i j = 0, using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='41), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='53b) the energy-momentum tensor satisfies conservation equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4 Magnetic Action II In this section, we will study details of the NLO Lagrangian or so-called magnetic sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The NLO Lagrangian contains leading order and NLO fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' From the expansion of action section, we have the NLO Lagrangian (coefficient of c0) in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The action looks horrible to analyze.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Thanks to Jacobi’s identity, fbcafdae + fdbafcae + fcdafbae = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='54) using this, we can simplify the action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using the above identity and doing some calculations, we can write the NLO Lagrangian in this form L(1) = � D(0) t aa(1) i � Ea(0) i − � D(0) i aa(1) t � Ea(0) i − 1 4fija(0)fa(0) ij .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='55) If we take the variation of the Lagrangian with respect to next to leading order fields aa(1) t , aa(1) i we will get Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='18), leading order equations of motion as a property of this formalism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we take variation with respect to leading order fields (aa(0) t , aa(0) i ), equations of motion are D(0) i D(0) i aa(1) t − D(0) i D(0) t aa(1) i − gfabcab(1) i Ec(0) i = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='56a) D(0) t D(0) t aa(1) i − D(0) t D(0) i aa(1) t − gfabcab(1) t Ec(0) i − D(0) k fa(0) ki = 0, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='56b) where D(0) k fa(0) ki = ∂kfa(0) ki + gfabcab(0) k fc(0) ki .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Although the action and the equations of mo- tion look nice in compact form, these are not Carroll invariant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' To make Carroll invariant, we have to take the constraint Ea(0) i = 0 at the level of action Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='55).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Then action will become − 1 4fija(0)fa(0) ij and equations of motion will be D(0) k fa(0) ki = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We can derive the Carroll invariant magnetic sector from the Relativistic Yang-Mills action if we consider a Lagrange multiplier in relativistic Lagrangian and then take speed of light to zero limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The relativistic Lagrangian with Lagrange multiplier ξa i and explicit c factor is given by L = −c2 2 ξa i ξa i + ξa i F a 0i − 1 4F a ijF a ij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='57) From here, we can get back to the usual Yang-Mills action if we integrate out ξi fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Now we can see if we take the small c limit here, we will get LNLO = ξa i (∂taa(0) i − ∂iaa(0) t ) − 1 4(∂iaa j − ∂jaa i )(∂iaa j − ∂jaa i ) + gfabcab tac iξa i −gfabcab iac j∂iaa j − 1 4g2fabcfadeab iac jad i ae j = ξa i Ea i − 1 4fa ijfa ij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58) – 17 – The Lagrangian contains non-trivial self-interaction terms or non-abelian terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The equa- tions of motion of this action are Ea i = 0, Diξa i = 0, Dtξi − Djfji = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='59) Here we are getting the constraints Ea(0) i = 0 as an equations of motion for the Lagrange(ξa i ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Below we will see the full spacetime symmetry of this action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gauge symmetry Before seeing the spacetime symmetry of the action, it will be good to check the gauge symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The action Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58) is invariant under the gauge transformation aa t → a ′a t = aa t + 1 g∂tαa + fabcab tαc, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='60a) aa i → a ′a i = aa i + 1 g∂iαa + fabcab iαc, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='60b) ξa i → ξ ′a i = ξa i + fabcξb i αc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='60c) The temporal and spatial component of the gauge field is transformed in the same way as the electric sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The Lagrange multiplier ξa i transforms as a scalar in the adjoint representation of the underlying gauge group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Spacetime symmetries Similar to the electric sector discussed above, we will see the symmetry of the action under dilatation (D), spatial SCT (Ki), and supertranslation Mf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The Rotation and boost invariance is shown in the appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Dilatation: Transformations of gauge fields(aa t , aa i ) and Lagrange multiplier(ξa i ) under the dilatation operator(D) are given by δDaa t = (t∂t + xk∂k + ∆1)aa t , δDaa i = (t∂t + xk∂k + ∆2)aa i , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='61a) δDξa i = (t∂t + xk∂k + ∆ξ)ξa i , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='61b) using this transformation in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58), the change of action as δDL = ∂t � tEa i Ea i � + ∂k � xkEa i Ea i � + ∂t � − 1 4fa ijfa ij � + ∂k � − 1 4fa ijfa ij � (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='62) if ∆1 = ∆2 = 1, ∆ξ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='63) So the action is invariant under dilatation in four spacetime dimensions if the scaling dimensions of the temporal and spatial component of gauge fields are one, and for ξi scaling dimension is two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 18 – Spatial SCT: Transformation of fields aa t , aa i and ξa i under spatial SCT are given by δKlaa t = � 2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � aa t + 2tqaa l , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='64a) δKlaa i = � 2xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � aa i + 2δlixkaa k − 2δlkxiaa k + 2tq′δliaa t , (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='64b) δKlξa i = � 4xl + 2xlt∂t + 2xkxl∂k − xkxk∂l � ξa i + 2δlixkξa k − 2δlkxiξa k + 2tq′′δliaa t +2q′′′tfa il, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='64c) using these transformations in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58), the action changes as δKlL(0) = ∂t(2xltξa i Ea(0) i ) − ∂t �1 2txlfa ijfa ij � + ∂k � 2xkxlξa i Ea i � − ∂k �1 2xkxlfa ijfa ij � −∂l � xkxkξa i Ea(0) i � + ∂l �1 4xkxkfa ijfa ij � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='65) So the action is invariant under spatial special conformal transformation if q = 0, q′ = 1, q′′ = 0, q′′′ = −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Supertranslation: Lastly, invariance under supertranslation (Mf), which contains Hamil- tonian, temporal spacial conformal, and boost operator for different choice of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Under this operator, fields transform as δMf aa t = f(x)∂taa t , δMf aa i = f(x)∂taa i − aa t ∂if(x), δMf ξa i = f(x)∂tξa i − fa ik∂kf(x),(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='66) using these in (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58), the actions changes as δMf L(0) = ∂t � f(x)ξa i Ea(0) i − 1 4fa ijfa ij � + ∂i � − 1 2fa ijEa j � + ∂j �1 2fa ijEa i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='67) The action is invariant under Mf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Now we conclude the NLO Lagrangian or the magnetic sector action for the λ = 0 case Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58) is invariant under full infinite CCA symmetry in four spacetime dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Energy-Momentum tensor Like the electric sector, the NLO Lagrangian or magnetic sector is infinite Carrollian con- formal invariant in 4d spacetime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Now let’s see what the energy-momentum tensor for the action Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58) is and see how we can improve it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Energy-momentum tensor of the action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58) is given by T t i = ξa k∂iaa k, T i t = −xa i ∂taa t − fa ik∂taa k, (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='68) T t t = ξa i ∂taa k − L, T i j = −ξa i ∂jaa t − fa ik∂jaa k − δi jL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='69) Here we also need an improved energy-momentum tensor as the electric sector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Following [42], the improved energy-momentum tensor formula for the magnetic sector is T µν = − δL δ∂µaaα ∂νaa α + δµνL − δµt∂i [ξa i aa ν] + δµi � ∂t(ξa i aa ν) + ∂j(fa ijaa ν) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='70) If we write it components wise explicitly T t i = ξa kfa ik, T i t = 0, T t t = 1 4fa ijfa ij, T i j = −fa ikfa jk − δi jL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='71) – 19 – Here we can see energy-momentum tensor is gauge invariant, traceless, symmetric in spatial indices, and T i t = 0 as expected for Carroll theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using equations of motion and Carrollian Bianchi identity, we can see ∂tT t t + ∂iT i t = 0, using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='59), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33a), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='72a) ∂tT t j + ∂iT i j = 0, using (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='59), (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='72b) the energy-momentum tensor satisfies conservation equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 5 Noether charges and Charge algebra All the four sectors of Carrollian Yang-Mills theory are invariant under infinite Carrollian conformal symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this section, we will study Noether’s charges and charge algebra and see if there is any central extension for any commutation relation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we vary the Lagrangian (L = � dd−1x L) on-shell on the field space in an arbitrary direction: ϕ → ϕ + δϕ, we have δL = � dd−1x � ∂tΘ(ϕ, ∂ϕ, δϕ) � : on-shell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) Here the expression for the Θ for all four sectors of action are δ = 0 : Electric Sector Θ = δaa i Ea(0) i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Magnetic Sector Θ = δaa i ξa i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) δ = 1 : Electric Sector Θ = δaa i ˜Ea(0) i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Magnetic Sector Θ = δaa i ξa i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) Next, we consider a specific infinitesimal transformation ϕ → ϕ + δϵϕ off-shell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The varia- tion δϵ is said to be a symmetry, if: δϵL = � dd−1x � ∂tβ(ϕ, ∂ϕ, δϵϕ) � : off-shell, (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4) for some function β in field space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we compare (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) and (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4), we deduce that on-shell: ∂tQϵ := � dd−1x∂t (Θ(Φ, ∂Φ, δϵΦ) − β(Φ, ∂Φ, δϵΦ)) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) Noether Charge is Q = � dd−1x � Θ − β � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Charges are listed below for different cases Electic(δ = 0) QBoost = � d3x � xk∂taa(0) i Ea(0) i + aa(0) t Ea(0) i − xk 2 Ea(0) i Ea(0) i � , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6a) QDilation = � d3x � t∂taa(0) i Ea(0) i + xk∂kaa(0) i Ea(0) i + a0 i Ea(0) i − tEa(0) i Ea(0) i � , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6b) QSpatial SCT = � d3x � 2xkt∂taa(0) i Ea(0) i + 2xlxk∂laa(0) i − xlxl∂kaa(0) i Ea(0) i + 2xkaa(0) i Ea(0) i +2xlaa(0) l Ea(0) k − 2xiaa(0) k Ea(0) i − txkEa(0) i Ea(0) i � , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6c) QMf = � d3x � f(x)∂taa(0) i Ea(0) i − aa(0) t ∂if(x)Ea(0) i − f(x)Ea(0) i Ea(0) i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6d) – 20 – Magnetic(δ = 0) QBoost = � d3x � xk∂taa i ξa i − aa t ξa i − xkξa i Ea i + xk 1 4F a ijF a ij � , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7a) QDilation = � d3x � t∂taa i ξa i + xk∂kaa i ξa i + aa i ξa i − tξa i Ea i + 1 4tfa ijfa ij � , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7b) QSpatial SCT = � d3x � 2xkt∂taa i ξa i + 2xlxk∂laa i ξa i − xlxl∂kaa i ξa(0) i + 2xkaa i ξa i +2xlaa l ξa(0) k − 2xiaa kξa i + 2tξa kaa t − 2txkEa i ξa i − 1 2txkfa ijfa ij � , (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7c) QMf = � d3x � f(x)∂taa(0) i ξa i + aa(0) t ∂if(x)ξa i − f(x)ξa i Ea(0) i − 1 4fa ijfa ij � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7d) Similarly, for the electric and the magnetic sector of the δ = 1 case, we can write Noether’s charge using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Expression of charges for δ = 1 case are similar to δ = 0 case, but instead of Ea i and fa ij we have to write in term of ˜Ea i and ˜fa ij respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we check the charge algebra using these charges, there is no central extension for any commutation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In the Galilean Yang-Mills case, there is a non-trivial state-dependent central charge in the charge algebra [67].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Here we will give just one example for the electric sector, and all the other commutation relations can be realized similarly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Using the expression of Θ, we can define the Poisson bracket for the electric sector as Ω(δ1, δ2) = δ1Θ(δ2) − δ2Θ(δ1) = δ1aa(0) i δ2Ea(0) i − δ2aa(0) i δ1Ea(0) i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8) Now if we check the algebra between dilatation (D) and supertranslation (Mf) using trans- formations of different fields in the electric sector, we can see that Ω(D, Mf) = QMh, where h = xk∂kf(x) − f(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='9) Here we can see that the commutation relation between dilatation and supertranslation is satisfied and there is no central charge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Similarly, we can realize all other commutation relations of infinite CCA for all the four sectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A detailed discussion of the charge algebra is given in Appendix B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 6 Propagator and Vertices We discussed the construction of Carrollian Yang-Mills actions, symmetry of all four sec- tors, energy-momentum tensor and its conservation, Noether charges, and charge algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Now we will start the Quantum aspects of the theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Details discussion on quantum properties will be in our subsequent work;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' here will give all the propagators and vertices for all four sectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For δ = 1 sector will discuss first.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this case, Lagrangian contained only kinetic terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' So there will be only propagators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' After that, we will discuss the propagator, vertices, and Feynman diagram for the δ = 0 cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 21 – 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1 Electric Sector I For the δ = 1 case, similar to the relativistic case, we cannot calculate the propagator without adding a gauge fixing term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The full electric sector(δ = 1) Lagrangian with a gauge fixing term is L = 1 2(∂taa(0) i − ∂iaa(0) t )(∂taa(0) i − ∂iaa(0) t ) − 1 2χ∂taa t ∂taa t , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) where χ is gauge parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In order to get propagators from this kinetic part of the Lagrangian, let us first introduce Fourier transformation to momentum space Φa(t, ⃗x) = � dω 2π d3⃗k (2π)3 e−iωtei⃗k·⃗x ˜Φa(ω,⃗k), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) where Φa = (aa t , aa i , ca, ¯ca, ξa i ), and delta functions � dt 2πe−iωt = δ(ω), � d3⃗x (2π)3 ei⃗k·⃗x = δ(3)(⃗k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) We also introduce the notation, k = (ω,⃗k) and Aa I = (aa t , aa i ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Taking Fourier transforma- tion and using delta functions, the action becomes S = � dωd3⃗k (2π)4 �1 2Aa I(k)dIJabAb J(−k) � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4) where dIJab(k) = iδab � −k2 + ω2 χ −ωkj −ωki −ω2δij � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) Then from the inverse of dIJab, we get the propagators for the fields Aa I as ⟨Aa IAb j⟩ = −iδab � χ ω2 − kiχ ω3 − kjχ ω3 kikjχ−ω2δij ω4 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6) where Aa I = aa t , aa i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' a b I J k (a) Gauge field propagator ⟨Aa IAb J⟩ Figure 1: Electric Propagator (δ = 1) – 22 – 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2 Magnetic sector I Let’s consider the magnetic sector Lagrangian for δ = 1 before adding any gauge fixing term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40) explicitly in terms of gauge fields is L = ξa i (∂taa(0) i − ∂iaa(0) t )(∂taa(0) i − ∂iaa(0) t ) − 1 4(∂iaa j − ∂jaa i )(∂iaa j − ∂jaa i ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7) Because there is no interaction term in the above action, no vertices are possible;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' only propagators will be there.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' If we write the above action in momentum space using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3), we have S = � dωd3⃗k (2π)4 �1 2Aa I(k)dIJabAb J(−k) � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8) where dIJab(k) = iδab � � � � 0 � 3×3 � iki � 3×1 − i � ωδij � 3×3 � iki � 1×3 01×1 � 0 � 1×3 −i � ωδij � 3×3 � 0 � 3×1 � k2δij − kikj � 3×3 � � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='9) The determinant of this matrix is zero, so we cannot derive a propagator by doing the inverse of this matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We need to add a gauge fixing term, so the full Lagrangian with gauge fixing term is L = ξa i (∂taa(0) i − ∂iaa(0) t )(∂taa(0) i − ∂iaa(0) t ) − 1 4(∂iaa j − ∂jaa i )(∂iaa j − ∂jaa i ) − 1 2χ∂iaa i ∂jaa j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10) Similar to equation(6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8) when we write this gauge fixed Lagrangian in momentum space the matrix (dIJab(k)) now become dIJab(k) = δab � � � � 0 � 3×3 � iki � 3×1 − i � ωδij � 3×3 � iki � 1×3 01×1 � 0 � 1×3 −i � ωδij � 3×3 � 0 � 3×1 � k2δij − (1 − 1 χ)kikj � 3×3 � � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11) The determinant of this matrix is non-zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The propagator, by doing the inverse of the above matrix is ⟨Aa IAb J⟩ = δab � � � � � k2δij−kikj ω2 � 3×3 � −ikj k2 � 3×1 i � k2δij−kikj ωk2 � 3×3 � −iki k2 � 1×3 ω2χ k4 � kjωχ k4 � 1×3 i � k2δij−kikj ωk2 � 3×3 � kjωχ k4 � 3×1 � −kikjχ k4 � 3×3 � � � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12) where Aa I = ξa i , aa t , aa i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This section considered Lagrangian for the δ = 1 case for the electric and magnetic sectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' These Lagrangians only contain kinetic terms, so there are only propagators, not vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 23 – a b I J k (a) Gauge field propagator ⟨Aa IAb J⟩ Figure 2: Magnetic Propagator (δ = 1) 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 Electric sector II Now focus on the δ = 0 cases for propagator and vertices containing non-abelian or self- interaction terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The full Lagrangian for the δ = 0 electric sector with gauge fixing term and ghost term is L = 1 2Ea(0) i Ea(0) i − 1 2χ∂taa t ∂taa t + +∂t¯caDtca.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='13) The kinetic part of the above Lagrangian is Lkin = 1 2(∂taa(0) i − ∂iaa(0) t )(∂taa(0) i − ∂iaa(0) t ) − 1 2χ∂taa t ∂taa t + ∂t¯ca∂tca.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='14) In momentum space using equations (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) we can write Skin = � dωd3⃗k (2π)4 �1 2Aa I(k)dIJabAb J(−k) + ¯ca(k) � − ω2� ca(−k) � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='15) The kinetic part of gauge fields and the gauge fixing term is the same as the Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' So the expression of dIJab is same as (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The inverse of this matrix is the propagator for gauge fields given in equation (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' And the inverse of the coefficient of ¯cc in (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='25) gives the propagator for ghost fields as ⟨¯ca(k)cb(−k)⟩ = iδab ω2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='16) In a compact form the propagators and Feynman diagrams of the Lagrangian are a b I J k ≡ −iδab � χ ω2 − kiχ ω3 − kjχ ω3 kikjχ−ω2δij ω4 � , a b k ≡ ⟨¯ca(k)cb(−k)⟩ = iδab ω2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17a) Interaction terms of the Lagrangian (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='13)) are Lint = 2gfabcab tac i∂tac i − 2gfabcab tac i∂iaa t + g2fabcfadeab tac iad t ae i − gfabcaa t ∂t¯cccb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='18) – 24 – By transforming to momentum space(6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) and using the delta functions (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) definition, we can write the three field interaction terms as S(3) int = � 1 (2π)12 3 � i=1 dωid3⃗ki (2π)4δ(ω1 + ω2 + ω3)δ(3)(⃗k1 + ⃗k2 + ⃗k3)gfabc × � (ω1 − ω2)ab t(k1)ac i(k2)aa i (k3) + iδij(k1 − k2)iab t(k1)ac t(k3)aa j(k2) −iω2φa(k1)¯cb(k2)cc(k3) � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='19) where n � i=1 dωid3⃗ki = dω1d3⃗k1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='dωnd3⃗kn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' From this expression we can write the 3-point vertices as V abc 3 ataiai = −gfabc(ω1 − ω2), V abc i 3 ataiai = −gfabc(k1 − k2)i, V abc 3 at¯cc = gfabcω2, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='20) Similarly, transforming the four field interaction terms of Sint to momentum space, we get S(4) int = � 1 (2π)16 4 � i=1 dωid3⃗ki (2π)4δ(ω1 + ω2 + ω3 + ω4)δ(3)(⃗k1 + ⃗k2 + ⃗k3 + ⃗k4) × g2� fabcfadeab t(k1)ac i(k2)ad t (k3)ae i(k4) � ,(6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='21) from which we can read of the 4-point vertices V bcde 4 ataiataj = −2ig2δij � fabcfade + fabefadc� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='22) 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4 Magnetic sector II The full magnetic sector Lagrangian for the δ = 0 case with gauge fixing term and ghost term is L = ξa i Ea i − 1 4fa ijfa ij − 1 2χ∂iaa i ∂jaa j − ∂i¯caDica.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='23) The kinetic part of the Lagrangian is Lkin = ξa i (∂taa(0) i − ∂iaa(0) t ) − 1 4(∂iaa j − ∂jaa i )(∂iaa j − ∂jaa i ) − 1 χ∂iaa i ∂jaa j − ∂i¯ca∂ica.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='24) Using equations (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) we can write the above Lagrangian in momentum space as Skin = � dωd3⃗k (2π)4 �1 2Aa I(k)dIJabAb J(−k) + ¯ca(k) � − ⃗k2� ca(−k) � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='25) where Aa µ = ξa i , aa t , aa i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The kinetic terms of gauge fields of this magnetic sector is the same as the Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10), so the matrix dIJab is same as Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11), so as the propagator Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The inverse of the coefficient of ¯cc gives the propagator for the ghost fields as ⟨¯ca(k)cb(−k)⟩ = iδab ⃗k2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='26) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='– 25 – ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='c ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='at ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='aj ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='b ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='ai ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(a) V abc ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 ataiaj ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='c ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='at ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='at ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='b ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='ai ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(b) V abc i ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 atatai ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='b ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='e ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='ai ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='at ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='c ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='aj ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='d ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='at ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(c) V bcde ij ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 atataiaj ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='b ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='c ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='at ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(d) V abc ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3 at¯cc ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='Figure 3: Electric Sector Feynman Diagrams ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='In a compact form the propagators and Feynman diagram of the Lagrangian are ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='a ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='b ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='I ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='J ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='≡ δab ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� k2δij−kikj ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='ω2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3×3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� −ikj ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3×1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='i ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� k2δij−kikj ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='ωk2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3×3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� −iki ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1×3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='ω2χ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� kjωχ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1×3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='i ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� k2δij−kikj ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='ωk2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3×3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� kjωχ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3×1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� −kikjχ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='k4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3×3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='� ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' a b k ≡ ⟨¯ca(k)cb(−k)⟩ = iδab ⃗k2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='27) Interaction terms of the Lagrangian are Lint = gfabcab tac iξa i − gfabcab iac j∂iaa j − 1 4g2fabcfadeab iac jad i ae j − gfabcab i∂i¯cacc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='28) All the three point interactions in momentum space using (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) are S(3) int = � 1 (2π)12 3 � i=1 dωid3⃗ki (2π)4δ(ω1 + ω2 + ω3)δ(3)(⃗k1 + ⃗k2 + ⃗k3)gfabc × � iδijξa i (k1)ab t(k2)ac j(k3) + + i 6 � (k1 − k2)iδilδjk + (k2 − k3)iδijδlk + (k3 − k1)iδikδjl� × aa j(k1)ab k(k2)ac l (k3) + iδijk2jaa i (k1)¯cb(k2)cc(k3) � ,(6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='29) from where we can write the 3-point vertices as V abc 3 ξiatai = −gfabc, V abc 3 ai¯cc = −gfabcki 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' V abc ijk 3 aiajak = −gfabc� (k1 − k2)kδij + (k2 − k3)iδjk + (k3 − k1)jδik� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='30) – 26 – The four point interaction terms in momentum space are S(4) int = � 1 (2π)16 4 � i=1 dωid3⃗ki (2π)4δ(ω1 + ω2 + ω3 + ω4)δ(3)(⃗k1 + ⃗k2 + ⃗k3 + ⃗k4) × g2� − 1 24 � fabcfade(δikδjl − δilδjk) + fabdface(δijδkl − δilδjk) +fabefacd(δijδkl − δikδjl) � ab i(k1)ac j(k2)ad k(k3)ae l (k4) � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='31) from which we can read of the 4-point vertices as V bcde ijkl 4 aiajakal = −ig2� fabcfade(δikδjl − δilδjk) + fabdface(δijδkl − δilδjk) +fabefacd(δijδkl − δikδjl) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='32) We will study the quantum properties of the non-trivial sectors of CYM in detail in our a k1 k3 c k2 at ξi b ai (a) V abc 3 ξiataj a k1 k3 c k2 aj ai b ak (b) V abc 3 aiajak b k1 k4 e k2 aj ai c al d ak k3 (c) V bcde ijkl 4 aiajakal a c b ai k1 k3 k2 (d) V abc 4 ai¯cc Figure 4: Magnetic Sector Feynman Diagrams subsequent work using the Feynman rules listed above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' After that, we will add the matter field to Carrollian Yang-Mills and construct a QCD-like structure in the Carrollian theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5 Propagators in position space In this section, we will see propagator of gauge fields in position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In momentum space, the electric and magnetic sectors’ propagators are (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6) and (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' All the correlation function of the electric and magnetic sector in position space is listed below – 27 – Electric Gab tt (x − y) = � � dωd3⃗k (2π)4 ⟨aa t ab t⟩e−iωteikixi = δabχ 2πtδ3(⃗x), (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='33) Gab ij (x − y) = � � dωd3⃗k (2π)4 ⟨aa i ab j⟩e−iωteikixi = δab� χ 2 3πt3∂i∂jδ3(⃗x) − δij2πtδ3(⃗x) � , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='34) Gab ti (x − y) = � � dωd3⃗k (2π)4 ⟨aa t ab i⟩e−iωteikixi = δabχ πt2∂iδ3(⃗x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='35) We can see the propagator is of the form of δ(x) with some time function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This means there is no propagation in space, only propagation in time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This is the behaviour expected from electric versions of theories and has been observed e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' in the theory of scalars and U(1) gauge fields earlier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Magnetic Gab tt (x − y) = � � dωd3⃗k (2π)4 ⟨aa t ab t⟩e−iωteikixi == −δab∂t∂tδ(t)4πrΛ, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='36) Gab ti (x − y) = � � dωd3⃗k (2π)4 ⟨aa t ab i⟩e−iωteikixi = −δab∂tδ(t)4πxi r Λ, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='37) Gab ij (x − y) = � � dωd3⃗k (2π)4 ⟨aa i ab j⟩e−iωteikixi = 4πδabδ(t){δij r − xixj r3 }, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='38) Gab ti (x − y)ξ = � � dωd3⃗k (2π)4 ⟨ξa i ab t⟩e−iωteikixi = −2π2δabδ(t)xi r3 , (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='39) Gab ij (x − y)ξ = � � dωd3⃗k (2π)4 ⟨ξa i ab j⟩e−iωteikixi = −π 2 δij δ3(⃗x) − 2π3{δij r3 − 3xixj r5 }, (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40) Gab ij (x − y)ξξ = � � dωd3⃗k (2π)4 ⟨ξa i ξb j⟩e−iωteikixi = δab2πt � δij∂2δ3(⃗x) − ∂i∂jδ3(⃗x) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='41) In the first two propagators, Λ = � ∞ 0 sinθ θ3 dθ, this is a divergent integration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' To regularize it Λ = � ∞ 0 sinθ θ3 dθ = � ∞ 0 1 θ2 dθ − π 4 = lim ϵ→0 � � ∞ ϵ 1 θ2 dθ − π 4 � = lim ϵ→0 �1 ϵ − π 4 � (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='42) Propagators in position space have delta functions in all of the above cases, but some delta functions are of time, and some are in spatial coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This finding is somewhat surprising as one does not expect to find a mixture of spatial and temporal delta functions in the magnetic sector, only temporal delta functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This is a pointer perhaps that all Carrollian magnetic theories would not be reducible to lower dimensional Euclidean CFTs as was shown in the scalar case in [76].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This point requires further investigation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 7 Conclusions and Discussions In this paper, we have analyzed the Carrollian limit of the Yang-Mills theory systematically, and obtained electric and magnetic sectors with one subsector of each of the electric and – 28 – magnetic sectors having non-abelian or self-interaction terms while the other subsector having copies of the Carrollian abelian theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The Carrollian abelian theory found here is consistent with that discussed in [42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This is a first action formulation for the Carrollian Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We have obtained the Carrollian Yang-Mills actions by taking the a small c-expansion of the Poincar´e invariant Yang-Mills action, where we observed that different values of the parameter δ, used in the small c-expansion of the gauge fields, lead to different sectors for the Carrollian Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In particular, for δ = 0, we get two non-trivial Carrollian Yang-Mills theories, and for any non-zero value of δ (that we have taken to be δ = 1 for simplicity), we get copies of the Carrollian abelian theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In 4- dimensions, all these four sectors are found to be invariant under infinite CCA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The energy- momentum tensor for all the four sectors were calculated and were found to be conserved using the equations of motion and the Bianchi identities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We have also calculated Noether charges for all four sectors, and found that there are no central extensions in the algebra of the charges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This is unlike the Galilean Yang-Mills theories in [67], where we have seen that there is a state-dependent central extension in the algebra of the charges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Finally, we listed all the Feynman rules to understand the quantum properties of the Carrollian Yang-Mills theory, with a detailed analysis kept for a future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Further, we also calculated the propagators in position space, and from these we explicitly saw the ultra-local behavior of the Carrollian theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' There are a number of immediate directions for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The construction of different Carrollian Yang-Mills is our first step toward our goal of understanding the full quantum properties of Carrollian Yang-Mills theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In our subsequent work, we will study the quantum structure of CYM theory, free and with matter fields, and investigate the different types of actions we have found in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In [29], authors studied the algebraic structure of Carrollian supersymmetric theory, and in the near future, we want to construct the Carrollian version of N = 4 Super Yang-Mills theory and understand its role in flat space holography.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The different actions we have found in this paper would be a starting point for the supersymmetrization of CYM theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Acknowledgments We would like to first thank Arjun Bagchi for fruitful discussions, necessary suggestions, and valuable comments on the manuscript.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' We would also like to thank Nilay Kundu, Kedar Kolekar, and Sudipta Dutta for fruitful discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A Rotation and Boost invariance Electric(δ=0) Rotation:- Under rotation fields transform as δMijaa(0) t = (xi∂j − xj∂i)aa(0) t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) δMijaa(0) k = (xi∂j − xj∂i)aa(0) k + (δikaa(0) j − δjkaa(0) i ) (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) – 29 – The action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17) changes under these transformations as δMijL(0) = ∂j � xiEa(0) k Ea(0) k � − ∂i � xjEa(0) k Ea(0) k � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) The action is rotation invariant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Boost:- Fields transform as δBiaa(0) t = xi∂taa(0) t + q1a(0)a i (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4) δBiaa(0) j = xi∂ta(0) j + q2δijaa(0) t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) The action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17) changes under these transformations as δBkL(0) = ∂t �xk 2 Ea(0) i Ea(0) k � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6) So the action is invariant under boost, if the constants q1 and q2 respectively are 0 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Magnetic(δ=0) Rotation:- Fields transform as δMijξa k = � xi∂j − xj∂i � xa k + δikξa j − δjkξi (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7) δMijaa(0) t = (xi∂j − xj∂i)aa(0) t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8) δMijaa(0) k = (xi∂j − xj∂i)aa(0) k + (δikaa(0) j − δjkaa(0) i ) (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='9) action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58) changes as δMijLNLO = ∂i � xjL � − ∂j � xiL � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10) The action is rotation invariant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Boost:- Fields transform as δBkaa t = xk∂taa t + q1aa k (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11) δBkaa i = xk∂taa i + q2δikaa t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12) δBkξa i = xk∂tξa i + q3fa ik (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='13) action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='58) changes as δBkL = ∂t � xkL � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='14) So the action is invariant under boost, if the constant q1, q2 and q3 respectively are 0, -1 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Electric(δ=1) Rotation:- Fields transform as δMijaa(0) t = (xi∂j − xj∂i)aa(0) t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='15) δMijaa(0) k = (xi∂j − xj∂i)aa(0) k + (δikaa(0) j − δjkaa(0) i ) (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='16) – 30 – action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) change as δMij ˜L(0) = ∂j �1 2xi ˜Ea(0) k ˜Ea(0) k � − ∂i �1 2xj ˜Ea(0) k ˜Ea(0) k � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='17) So the action is invariant under rotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Boost Fields transform as δBiaa(0) t = xi∂taa(0) t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='18) δBiaa(0) j = xi∂ta(0) j + δijaa(0) t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='19) action (4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) change as δBi ˜L(0) = ∂t �1 2xi ˜Ea(0) k ˜Ea(0) k � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='20) So the action is invariant under boost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Magnetic(δ=1) Rotation:- Fields transform as δMijξa k = � xi∂j − xj∂i � xa k + δikξa j − δjkξi (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='21) δMijaa(0) t = (xi∂j − xj∂i)aa(0) t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='22) δMijaa(0) k = (xi∂j − xj∂i)aa(0) k + (δikaa(0) j − δjkaa(0) i ) (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='23) action(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40) changes as δMijLNLO = ∂i � xjL � − ∂j � xiL � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='24) So the action is invariant under rotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Boost:- Fields transform as δBkaa t = xk∂taa t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='25) δBkaa i = xk∂taa i − δikaa t (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='26) δBkξa i = xk∂tξa i + ˜fa ik (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='27) Action(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='40) change as δBkL = ∂t � xkL � (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='28) So the action is invariant under boost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' B Charge Algebra In sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5 have Noether’s charge and discussion on charge algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In this appendix, we give a charge in pre-symplectic language with some examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 31 – Electric Sector Using the expression of Θ, we can define the Poisson bracket for the electric sector as Ω(δ1, δ2) = � d3x � δ1Θ(δ2) − δ2Θ(δ1) � = � d3x � δ1aa(0) i δ2Ea(0) i − δ2aa(0) i δ1Ea(0) i � (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) Let see some commutation of Conformal Carrollian algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [P, P]: δPiaa(0) j = ∂iaa(0) j , δPiaa(0) t = ∂iaa(0) t (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) Using these expression in the Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) we will get Ω(δPl, δPk) = � d3x � ∂laa(0) i ∂kEa(0) i − ∂kaa(0) i ∂lEa(0) i � = � d3x � ∂l(aa(0) i ∂kEa(0) i ) − ∂k(aa(0) i ∂lEa(0) i ) � = 0 (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) Last equality is zero because of the total derivative in the previous step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [P, M]: δMf aa(0) t = f(x)∂taa(0) t , δMf aa(0) i = f(x)∂taa(0) i + aa(0) t ∂if(x) (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5a) Ω(δPl, δMf(x)) = � d3x � ∂laa(0) i f(x)∂tEa(0) i − (f(x)∂taa(0) i + aa(0) t ∂lf(x))∂lEa(0) i � = � d3x � ∂lf(x)Ea(0) i Ea(0) i + f(x)∂laa(0) i ∂tEa(0) i + f(x)aa(0) t ∂t∂iEa(0) i � = Qelectric(Mh) where h = ∂lf(x) (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) [D, M]: Ω(δD, δMf(x)) = Qelectric(Mh1) where h1 = −f(x) + xk∂kf(x) (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6) [Ki, Mf] Ω(δKi, δMf ) = Qelectric(Mh2), where h2 = (2xixk∂k − xkxk∂i − 2xi)f(x) (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='7) Expression of the Qelectric is given in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Magnetic Sector Using the expression of Θ, we can define the Poisson bracket for the magnetic sector as Ω(δ1, δ2) = � d3x � δ1Θ(δ2) − δ2Θ(δ1) � = � d3x � δ1aa i δ2ξa i − δ2aa i δ1ξa i � (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='8) Similar to electric case we can see – 32 – [P, P]: Using these expression in the Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) we will get Ω(δPl, δPk) = 0 (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='9) Last equality is zero because of the total derivative in the previous step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [P, M] : Using the transformation given in previous section Ω(δPl, δMf(x)) = QMagnetic(Mh) where h = ∂lf(x) (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10) [D, M]: Ω(δD, δMf(x)) = QMagnetic(Mh) where h1 = −f(x) + xk∂kf(x) (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11) [Ki, Mf] Ω(δKi, δMf ) = QMagnetic(Mh1), where h2 = (2xixk∂k − xkxk∂i − 2xi)f(x) (B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12) Expression of the Qmagnetic is given in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' C Discussion on previous work on Carrollian Yang-Mills theory In [15], authors discussed Carrollian Yang-Mills theory at the level of equations of motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In their analysis for the SU(2) theory, there are four different sectors of Carrollian Yang- Mills equations of motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' For the details discussion readers are encouraged to see the references mentioned above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Here we will do a similar analysis and see how we can relate our results to the previous analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The relativistic equations of motion is ∂µF a µν + gfabcAb µF c µν = DµF a µν = 0, (C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1) we can write temporal and spatial part as ∂iF a i0 + gfabcAb iF c i0 = DiF a i0 = 0 (C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2) ∂iF a ij + gfabcAb iF c ij = DiF a ij = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' (C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3) To derive the Carrollian Yang-Mills equations of motion using the formalism discussed in [15] we have to scale t, x and all the fields of the theory along with coupling (g) as xi → ϵβxi, x0 → ϵβ+1t, Aa i → ϵα+1aa i , Aa 0 → ϵαaa t , g → ϵγg, with ϵ → 0 (C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4) In this limit the consistent equations of motions are ∂iEa i + gfabcab iEc i = DiEa i = 0 (C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5) ∂ifa ij + gfabcab ifc ij = Difa ij = 0 (C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='6) if γ = −(α + β + 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 33 – where Ea i = ∂taa i − ∂iaa t + gfabcaa t aa i , fa ij = ∂iaa j − ∂jaa i + gfabcaa i aa j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' These equations of motion are same as eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='(4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='18).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The equations of motion we get here by scaling of fields are reproduced from the electric sector action discussed in the section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' In sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1 we have another electric sector of Carrollian Yang-Mills theory which is copies of the electric sector of Carrollian abelian theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' The equations of motion of this electric sector are computed previously in [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' This paper’s magnetic sector equations of motion do not match the previous works done in [15] because these results derive from the relativistic theory with a Lagrange Multiplier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' References [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Maldacena, The Large N limit of superconformal field theories and supergravity, Adv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Theor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 2 (1998) 231 [hep-th/9711200].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [2] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Duval, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Burdet, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Kunzle and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Perrin, Bargmann Structures and Newton-cartan Theory, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' D 31 (1985) 1841.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [3] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Duval and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Horvathy, Non-relativistic conformal symmetries and Newton-Cartan structures, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A 42 (2009) 465206 [0904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='0531].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [4] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Van den Bleeken and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Yunus, Newton-Cartan, Galileo-Maxwell and Kaluza-Klein, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 33 (2016) 137002 [1512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='03799].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [5] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bergshoeff, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rosseel and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Zojer, Non-relativistic fields from arbitrary contracting backgrounds, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 33 (2016) 175010 [1512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06064].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [6] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hansen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hartong and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Obers, Non-Relativistic Gravity and its Coupling to Matter, JHEP 06 (2020) 145 [2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10277].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [7] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hansen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hartong and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Obers, Non-relativistic expansion of the Einstein-Hilbert Lagrangian, in 15th Marcel Grossmann Meeting on Recent Developments in Theoretical and Experimental General Relativity, Astrophysics, and Relativistic Field Theories, 5, 2019, 1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='13723.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [8] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hansen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hartong, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Obers and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Oling, Galilean first-order formulation for the nonrelativistic expansion of general relativity, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' D 104 (2021) L061501 [2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='01518].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [9] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Ergen, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hamamci and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Van den Bleeken, Oddity in nonrelativistic, strong gravity, Eur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' C 80 (2020) 563 [2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='02688].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [10] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Susskind, Holography in the flat space limit, AIP Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 493 (1999) 98 [hep-th/9901079].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [11] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Fareghbal, BMS/GCA Redux: Towards Flatspace Holography from Non-Relativistic Symmetries, JHEP 10 (2012) 092 [1203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5795].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [12] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, Correspondence between Asymptotically Flat Spacetimes and Nonrelativistic Conformal Field Theories, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 105 (2010) 171601 [1006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3354].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [13] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grumiller and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Riegler, Entanglement entropy in Galilean conformal field theories and flat holography, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 114 (2015) 111602 [1410.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4089].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [14] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu, 3D Flat Holography: Entropy and Logarithmic Corrections, JHEP 03 (2014) 020 [1312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5748].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 34 – [15] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Kakkar and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mehra, Flat Holography: Aspects of the dual field theory, JHEP 12 (2016) 147 [1609.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06203].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [16] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Barnich and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Compere, Classical central extension for asymptotic symmetries at null infinity in three spacetime dimensions, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 24 (2007) F15 [gr-qc/0610130].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [17] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Barnich and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Troessaert, Aspects of the BMS/CFT correspondence, JHEP 05 (2010) 062 [1001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1541].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [18] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Barnich, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gomberoff and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gonzalez, The Flat limit of three dimensional asymptotically anti-de Sitter spacetimes, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' D 86 (2012) 024020 [1204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='3288].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [19] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Barnich, Entropy of three-dimensional asymptotically flat cosmological solutions, JHEP 10 (2012) 095 [1208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4371].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [20] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Ciambelli, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Marteau, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Petkou, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Petropoulos and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Siampos, Flat holography and Carrollian fluids, JHEP 07 (2018) 165 [1802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06809].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [21] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Muraki, Boosting to BMS, 2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='05094.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [22] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Pasterski, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Pate and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Raclariu, Celestial Holography, in 2022 Snowmass Summer Study, 11, 2021, 2111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11392.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [23] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Raclariu, Lectures on Celestial Holography, 2107.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='02075.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [24] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Pasterski, Lectures on celestial amplitudes, Eur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' C 81 (2021) 1062 [2108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='04801].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [25] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Dappiaggi, BMS field theory and holography in asymptotically flat space-times, JHEP 11 (2004) 011 [hep-th/0410026].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [26] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Dappiaggi, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Moretti and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Pinamonti, Rigorous steps towards holography in asymptotically flat spacetimes, Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 18 (2006) 349 [gr-qc/0506069].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [27] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mehra and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Nandi, Field Theories with Conformal Carrollian Symmetry, JHEP 05 (2019) 108 [1901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='10147].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [28] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mehra and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Nandi, Field Theories on Null Manifolds, JHEP 02 (2020) 141 [1912.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='09388].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [29] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grumiller and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Nandi, Carrollian superconformal theories and super BMS, 2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='01172.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [30] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Duval, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gibbons and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Horvathy, Conformal Carroll groups and BMS symmetry, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 31 (2014) 092001 [1402.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='5894].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [31] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Duval, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gibbons and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Horvathy, Conformal Carroll groups, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A47 (2014) 335204 [1403.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='4213].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [32] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Dutta, Scattering Amplitudes: Celestial and Carrollian, 2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='08438.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [33] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Donnay, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Fiorucci, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Herfray and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Ruzziconi, A Carrollian Perspective on Celestial Holography, 2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='04702.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [34] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Donnay and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Marteau, Carrollian Physics at the Black Hole Horizon, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 36 (2019) 165002 [1903.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='09654].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [35] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Dautcourt, On the ultrarelativistic limit of general relativity, Acta Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Polon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' B 29 (1998) 1047 [gr-qc/9801093].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 35 – [36] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hartong, Gauging the Carroll Algebra and Ultra-Relativistic Gravity, JHEP 08 (2015) 069 [1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='05011].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [37] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bergshoeff, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gomis, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rollier, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rosseel and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' ter Veldhuis, Carroll versus Galilei Gravity, JHEP 03 (2017) 165 [1701.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06156].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [38] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Duval, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gibbons, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Horvathy and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Zhang, Carroll versus Newton and Galilei: two dual non-Einsteinian concepts of time, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 31 (2014) 085016 [1402.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='0657].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [39] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Ciambelli and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Marteau, Carrollian conservation laws and Ricci-flat gravity, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 36 (2019) 085004 [1810.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11037].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [40] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Morand, Embedding Galilean and Carrollian geometries I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Gravitational waves, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 61 (2020) 082502 [1811.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12681].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [41] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Ciambelli, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Leigh, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Marteau and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Petropoulos, Carroll Structures, Null Geometry and Conformal Isometries, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' D 100 (2019) 046010 [1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='02221].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [42] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' de Boer, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hartong, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Obers, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Sybesma and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Vandoren, Carroll symmetry, dark energy and inflation, 2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='02319.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [43] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Ciambelli, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Marteau, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Petkou, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Petropoulos and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Siampos, Covariant Galilean versus Carrollian hydrodynamics from relativistic fluids, Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 35 (2018) 165001 [1802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='05286].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [44] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Petkou, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Petropoulos, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Betancour and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Siampos, Relativistic Fluids, Hydrodynamic Frames and their Galilean versus Carrollian Avatars, 2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='09142.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [45] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Freidel and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Jai-akson, Carrollian hydrodynamics from symmetries, 2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='03328.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [46] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Freidel and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Jai-akson, Carrollian hydrodynamics and symplectic structure on stretched horizons, 2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06415.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [47] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Redondo-Yuste and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Lehner, Non-linear black hole dynamics and Carrollian fluids, 2212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06175.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [48] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Nandkishore and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hermele, Fractons, Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Condensed Matter Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 10 (2019) 295 [1803.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11196].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [49] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bidussi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hartong, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Have, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Musaeus and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Prohazka, Fractons, dipole symmetries and curved spacetime, 2111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='03668.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [50] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' P´erez and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Prohazka, Asymptotic symmetries and soft charges of fractons, 2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='02817.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [51] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Islam and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mondal, Magic Fermions: Carroll and Flat Bands, 2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11640.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [52] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hansen, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Obers, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Oling and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Søgaard, Carroll Expansion of General Relativity, 2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12684.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [53] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, Tensionless Strings and Galilean Conformal Algebra, JHEP 05 (2013) 141 [1303.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='0291].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [54] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Chakrabortty and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Parekh, Tensionless Strings from Worldsheet Symmetries, JHEP 01 (2016) 158 [1507.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='04361].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [55] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Chakrabortty, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Dutta and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Parekh, A tale of three — tensionless strings and vacuum structure, JHEP 04 (2020) 061 [2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='00354].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 36 – [56] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mandlik and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Sharma, Tensionless tales: vacua and critical dimensions, JHEP 08 (2021) 054 [2105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='09682].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [57] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Chakrabortty and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Chatterjee, A Rindler Road to Carrollian Worldsheets, 2111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='01172.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [58] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Dutta and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mondal, Carroll fermions in two dimensions, 2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11639.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [59] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='-f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Yu and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Chen, Free field realization of the BMS Ising model, 2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06926.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [60] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='-X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hao, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Song, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Xiao and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Xie, A BMS-invariant free fermion model, 2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06927.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [61] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bellac and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='-M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Levy-Leblond, Galilean Electromagnetism, Nuovo Cimento.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 14B (1973) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [62] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mehra, Galilean Conformal Electrodynamics, JHEP 11 (2014) 061 [1408.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='0810].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [63] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Kakkar and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mehra, Galilean Yang-Mills Theory, JHEP 04 (2016) 051 [1512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='08375].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [64] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Chakrabortty and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mehra, Galilean Field Theories and Conformal Structure, JHEP 04 (2018) 144 [1712.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='05631].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [65] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Festuccia, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hansen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Hartong and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Obers, Symmetries and Couplings of Non-Relativistic Electrodynamics, JHEP 11 (2016) 037 [1607.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='01753].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [66] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mohan, Uniqueness of Galilean Conformal Electrodynamics and its Dynamical Structure, JHEP 11 (2019) 041 [1909.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='11993].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [67] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Bagchi, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Islam, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Kolekar and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mehra, Galilean Gauge Theories from Null Reductions, 2201.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='12629.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [68] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Chapman, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Di Pietro, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Grosvenor and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Yan, Renormalization of Galilean Electrodynamics, JHEP 10 (2020) 195 [2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='03033].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [69] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Sharma, Quantization of Interacting Galilean Field theories, 2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='01918.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [70] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Leblond, Une nouvelle limite non-relativiste du group de Poincar´e, Annales Poincare Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='Theor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 3 (1965) 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [71] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Sen Gupta, On an Analogue of the Galileo Group, Nuovo Cim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' 54 (1966) 512 • DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='1007/BF02740871 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [72] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Chowdhury, Dynamical structure of Carrollian Electrodynamics, JHEP 04 (2018) 111 [1802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='09366].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [73] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Banerjee, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Basu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mehra, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Mohan and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Sharma, Interacting Conformal Carrollian Theories: Cues from Electrodynamics, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' D 103 (2021) 105001 [2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='02829].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [74] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Henneaux and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Salgado-Rebolledo, Carroll contractions of Lorentz-invariant theories, JHEP 11 (2021) 180 [2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='06708].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [75] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Levy-Leblond, Une nouvelle limite non-relativiste du group de Poincare, Ann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Poincare 3 (1965) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' [76] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Baiguera, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Oling, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Sybesma and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' Søgaard, Conformal Carroll Scalars with Boosts, 2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content='03468.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} +page_content=' – 37 –' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/vtAzT4oBgHgl3EQfB_rN/content/2301.00953v1.pdf'} diff --git a/wdE3T4oBgHgl3EQflQpC/content/2301.04604v1.pdf b/wdE3T4oBgHgl3EQflQpC/content/2301.04604v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e98dd92e82c1f76fba2c3f299dcc6e1e30321dae --- /dev/null +++ b/wdE3T4oBgHgl3EQflQpC/content/2301.04604v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71d8ff40d3969cd54ba0dfad8b3346023a1ba4c2f2e75a35723c0ef996c2aca4 +size 8427615 diff --git a/wdE3T4oBgHgl3EQflQpC/vector_store/index.faiss b/wdE3T4oBgHgl3EQflQpC/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..15c6065cb8571178ac014e30c19929d56cf1c017 --- /dev/null +++ b/wdE3T4oBgHgl3EQflQpC/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7ee85564d78125dd736403397345726d4eb52567274936d34f3cab8fbd88054 +size 3276845 diff --git a/wdE3T4oBgHgl3EQflQpC/vector_store/index.pkl b/wdE3T4oBgHgl3EQflQpC/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f453594e0c21d674486e3206691ade59867cf2f7 --- /dev/null +++ b/wdE3T4oBgHgl3EQflQpC/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce61570c477e3861ef2b914bfe01f0ea287f3ef965e26d5030e13d8a92966538 +size 116496 diff --git a/wtAzT4oBgHgl3EQfCPrg/content/tmp_files/2301.00957v1.pdf.txt b/wtAzT4oBgHgl3EQfCPrg/content/tmp_files/2301.00957v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..85044925c20df2e355875e53df2dfcf3c6ae785a --- /dev/null +++ b/wtAzT4oBgHgl3EQfCPrg/content/tmp_files/2301.00957v1.pdf.txt @@ -0,0 +1,1411 @@ +APS/123-QED +Meta-learning generalizable dynamics from trajectories +Qiaofeng Li1,2,3, Tianyi Wang2, Vwani Roychowdhury2,∗, and M.K. Jawed1,∗ +1Dept. of Mechanical and Aerospace Engineering, +University of California, Los Angeles, CA 90095, USA +2Dept. of Electrical and Computer Engineering, +University of California, Los Angeles, CA 90095, USA +3Dept. +of Mechanical Engineering, Massachusetts Institute of Technology, Cambridge, MA 02139, USA +1 +arXiv:2301.00957v1 [cs.LG] 3 Jan 2023 + +Abstract +We present the interpretable meta neural ordinary differential equation (iMODE) method to +rapidly learn generalizable (i.e. +not parameter-specific) dynamics from trajectories of multiple +dynamical systems that vary in their physical parameters. +The iMODE method learns meta- +knowledge, the functional variations of the force field of dynamical system instances without know- +ing the physical parameters, by adopting a bi-level optimization framework: an outer level cap- +turing the common force field form among studied dynamical system instances and an inner level +adapting to individual system instances. A priori physical knowledge can be conveniently em- +bedded in the neural network architecture as inductive bias, such as conservative force field and +Euclidean symmetry. With the learned meta-knowledge, iMODE can model an unseen system +within seconds, and inversely reveal knowledge on the physical parameters of a system, or as a +Neural Gauge to “measure” the physical parameters of an unseen system with observed trajec- +tories. We test the validity of the iMODE method on bistable, double pendulum, Van der Pol, +Slinky, and reaction-diffusion systems. +Building predictive models of dynamical systems is a central challenge across diverse +disciplines of science and engineering. Traditionally, this has been achieved by first manu- +ally deriving the governing equations with carefully chosen state variables and then fitting +the undetermined physical parameters using observed data, e.g., [1–3]. In order to avoid +the painstaking formulation of analytical equations, researchers have recently leveraged ad- +vances in machine learning and the data-fitting power of neural networks (NNs) to make +the modeling process both automatic and more expressive [4]. This is achieved by either +adopting the conventional physics-based approach as a starting point and then replacing +various components with data-driven modules [5, 6], or directly learning discrete dynamics +using autoregressive models from high-dimensional observations [7–9]. These works, while +promising, need to fit dedicated models separately for different system instances with dif- +ferent parameters, which limits a model’s applicability to one specific instance. +In this letter, our goal is to learn meta-knowledge, the form of dynamics that is unre- +stricted to specific physical parameters or initial/boundary conditions, on dynamical systems +to reveal physical insights [10–12] and to significantly improve the generalization ability of +data-driven models. Specifically, we learn the shared dynamics form from the trajectories +2 + +generated by a series of dynamical system instances in spite of their diversified behaviors in +data, without knowing the system parameters. This separates our work from Refs. [13, 14] +and Neural Operators [15–18], in which true parameters should be provided. This goal aligns +with that of multi-task meta-learning [19], which aims to leverage the similarities between +different tasks to enable better generalization and efficient adaptation to unseen tasks. +We propose an efficient and interpretable method to model a family of dynamical systems +using their observed trajectories, by combining gradient-based meta-learning (GBML) [20– +24] with neural ordinary differential equations (NODE) [1, 6, 25]. In recognizing that the +systems have shared dynamics form and varying physical parameters, we separate the model +parameters into two parts: the shared parameters that capture the shared form of dynamics, +i.e. the meta-knowledge, and the adaptation parameters that account for variations across +system instances. The method generalizes well on unseen systems from the same family, +and the adaptation parameters show good interpretability. The intrinsic dimension of the +varying system parameters can be estimated by analyzing the adaptation parameters. Given +ground truth of the system parameters, simple correspondence can be established between +the adaptation parameters and actual physical parameters through diffeomorphism, which +can be utilized as a “Neural Gauge” to measure properties of new systems through observed +trajectories. We name our method interpretable meta neural ODE (iMODE). +In a general autonomous second-order system, the state of the system y contains the +position (generalized coordinates) x and the velocity ˙x. The dynamics of the second-order +system is expressed by +˙y = +� +� ˙x +¨x +� +� = +� +� +˙x +M−1Fφ(y) +� +� , where y = +� +�x +˙x +� +� +(1) +where Fφ is the force vector containing all the internal and external forces, and M is the mass +matrix. With a set of physical parameters φ, the force function F(·) dictates the dynamics +of the system, which determines a unique trajectory y(t) given an initial condition y(t0). +In the remainder of the letter, without loss of generality, mass is normalized to an identity +matrix, i.e., M = I. +Trajectories are collected from multiple system instances into a dataset D. +Consider +Ns instances that share the dynamics form Fφ(·), but have distinct physical parameters, +{φ1, . . . , φNs} respectively. From each system instance, Ntr trajectories are observed, each +containing observations across T time steps. In summary, D = +� +{yi,j(tk)}T +k=0|i = 1, . . . , Ns, j = 1, . . . , Ntr +� +. +3 + +-2 +0 +2 +-10 +-5 +0 +5 +10 +-2 +0 +2 +-7 +-3 +0 +3 +7 +-2 +0 +2 +-4 +-2 +0 +2 +4 +True +Pred +Force Field +v +x +Trajectories +Neural ODE +v +x +v +x +v +x +v +x +v +x +(a) +(b) +(c) +(d) + +2 +1 +0 +-1 +-2 +-2 +-1 +0 +1 +2 +2 +1 +0 +-1 +-2 +-2 +-1 +0 +1 +2 +2 +1 +0 +-1 +-2 +-2 +-1 +0 +1 +2 +2 +-2 +0 +-2 +-4 +0 +2 +4 +2 +-2 +0 +-3 +-7 +0 +3 +7 +2 +-2 +0 +-5 +-10 +0 +5 +10 +Figure 1. +(color online). +(a) In iMODE, a neural module Fθ parameterized by θ takes the +concatenation of system state y and the adaptation parameters η and generates the estimated +force as output. (b) The bi-level iteration process in the iMODE method. The NN weights θ are +shared across system instances while η is adapted for each instance. The meta gradient w.r.t. θ +aggregates the gradients evaluated with instance-adapted η. (c) Examples of estimated force field +fθ(·; η) for Van der Pol system instances that differ in their ϵ parameter (in ascending order from +top to bottom). The estimation quality is further evaluated through the trajectories generated by +the fields as shown in (d). (d) The estimated force field can be used to predict system trajectories +for unseen initial conditions through integration (Eq. (2)). The signature limit cycles of Van der +Pol systems are faithfully reproduced. +The data-driven model is trained on D, knowing which trajectories are from the same system +instance (i.e. given both the index i and j of trajectories), but is not given the knowledge +of {φi}Ns +i=1. Take the pendulum system as an example. An instance is a pendulum with +a specific arm length (since the inertia is normalized), therefore φ includes only the arm +length. A trajectory contains the location and speed of the pendulum during a time period. +In our framework, a neural network f θ(y; η) (Fig. 1(a). See Supplemental Material (SM) +[27] for detailed description) replaces Fφ(y) in Eq. (1) to approximate the observed tra- +jectories, where η is adapted to each system instance such that with a certain ηi, f θ(y; ηi) +4 + +-1-2 +-2-02000.2 +0.10.4 +0.30.2 +0.10.6 +0.50.8 +0.70.90.4 +0.310.6 +0.5-2 +-20.8 +0.7-0.901-2 +-2-0200.2 +0.10.4 +0.320.6 +0.50.8 +0.70.9approximates the force function of the ith system instance Fφi(y). After training, η becomes +a proxy for the physical parameters φ. θ is the model parameters that capture the functional +form of dynamics shared across system instances. The predicted trajectory starting from an +initial condition y0 is given by integration (the 5th-order Dormand-Prince-Shampine solver +is used throughout this letter to compute integrals) +�y(t, y0, θ, η) = y0 + +� t +t0 +f θ (�y(τ); η) d τ +(2) +For brevity, we denote the trajectory yi,j(t) as yi,j, the corresponding prediction �y(t, yi,j(t0), θ, η) +as �yi,j(θ, η), and use ∥ yi,j − �yi,j(θ, η) ∥2 to denote �T +k=0 +� +yi,j(tk) − �y(tk, yi,j(t0), θ, η) +�2, +the squared difference between yi,j and �yi,j(θ, η) across all time steps. +The goal of the modeling is formulated as a bi-level optimization (Fig. 1(b)), +outer: +min +θ +�L(θ) = 1 +Ns +Ns +� +i=1 +Li(θ, η(m) +i +), where +(3) +Li(θ, ζ) = +1 +NtrT +Ntr +� +j=1 +∥ yi,j − �yi,j(θ, ζ) ∥2, +(4) +inner: +η(l+1) +i += η(l) +i − α∇ηLi(θ, η(l) +i ), η(0) +i += η +(5) +where the inner-level involves an m-step gradient descent adapting η for each instance, while +the outer-level finds the optimal initialization for θ. α is the inner-level stepsize and η(m) +i +is +the adaptation parameters for the ith system instance after m steps of adaptation. For short, +we denote such ith adaptation result as ηi. Note that ηi depends on both θ and η as shown +in Eq. (5). To avoid higher-order derivatives, we simplify such dependency following the +first-order Model Agnostic Meta-Learning (first-order MAML) [20] and use the outer-level +step as +θ ← θ − β +Ns +� +i +∇θLi(θ, ηi), (assuming that ∂ηi +∂θ = 0) +(6) +where β is the outer-level stepsize. At both the inner-level and outer-level, the gradient +calculation for functions involving integrals is enabled by NODE [1, 6, 25]. +As shown in Fig. 1(c), f θ(·; η) specifies a force field that morphs as η changes. Note that +m is normally quite small (e.g. 5), so given trajectories of a previously unseen system, η +can be efficiently updated with few gradient steps, adapting the NN to specify a force field +explaining behaviors of the new system, which is one order-of-magnitude faster compared +5 + +to training from scratch (Fig. 3(a)). Trajectories with arbitrary initial conditions can be +inferenced based on the force field (Fig. 1(d)). +-2 +-1 +0 +1 +2 +-5 +-2 +0 +2 +5 +-1.3 +-1 +-0.7 +-0.4 +-0.1 +0.2 +0 +2 +4 +6 +8 +10 +1/L (m-1) +-0.6 +-0.5 +-0.4 +-0.3 +0.1 +0.2 +0.3 +0.4 +(a) +(c) +(d) +(b) +(e) +(f) +Force field +evolution +Force field +evolution +k3 increase +k1 increase +ω increase +5 +2 +0 +-2 +-5-2 +-1 +0 +2 +1 +-1.3 +x (m) +-1 +-0.7 -0.4 -0.1 0.2 +0 +2 +4 +6 +8 +10 +η +1/L (m-1) +v (m/s) +0x (m) +v (m/s) +0 +1.5 +-1.5 +1 +-1 +-0.6 +-0.5 +-0.4 +-0.3 +0.1 +0.2 +0.3 +0.4 +η1 +η2 +x (m) +v (m/s) +η1 +4 +2 +0 +-2 +-5 +5 +0 +η2 +0.5 +1.5 +1 +0 +1.8 +1.6 +1.4 +1.2 +1.5 +1 +η3 +0.5 +1 +Figure 2. (color online). (a) The meta-learning results for the pendulum. The iMODE trajectory +prediction (circles) with different arm lengths (different colors) match those of the ground truth +(solid lines). +(b) The learned η is in good correlation with the effective stiffness of different +pendulums (1/L). (c) The predicted trajectories (circles) match those of ground truth (solid lines) +with different initial conditions (black stars) and different system parameters (different colors) for +the bistable system. (d) Two principal axes can be identified from the latent space of the learned +η, each regarding the variation of one physical parameter. (e) Similar to (c) but for the Van der +Pol system. (f) The principal axis regarding to the variation of ω for the Van der Pol system. +First we validate the modeling capability of the iMODE algorithm on 3 cases: oscillating +pendulum, bistable oscillator, and Van der Pol system (see SM [27] for detailed description). +The oscillating pendulum has 1 physical parameter, i.e. the arm length (rotational inertia +normalized). Fig. 2(a) shows that the predicted trajectories using task-adapted NNs match +6 + +/s) +n000004 +1.2 +M211 +0.5 +M10.51.51e +0000001.5=1.0 +=1.51=0.51.81.6 +14 +1.2 +M211 +0.5 +M10.51.511.5=1.0 +=1.5=0.51.81.6 +14 +1.2 +M211 +0.5 +M10.51.511.5=1.0 +=1.5=0.5-1.5900 +880080oe-10 +m(m/s)-50 +-2m5024**米1.5¥01.81.6 +1the ground truth of each system. Fig. 2(b) shows that the learned η correlates well with the +effective stiffness of the pendulum, i.e. 1/L. Effectively η acts as a proxy of the true arm +length and can be used to infer such parameters of unseen systems. +The bistable system has a potential energy function controlled by 2 parameters k1 and k3. +Its potential energy has two local minima, or potential wells. When the initial conditions +vary, the bistable system can oscillate intra-well or inter-well. +Fig. 2(c) shows that the +task adapted trajectories (m = 5) match the ground truth well. Fig. 2(d) shows that the +identified η ∈ R2 has two principal axes, along which k1 and k3 increases. As mentioned, η is +effectively a proxy for k1 and k3. Later we will show that the mapping from η to φ = [k1, k3] +can be constructed as a diffeomorphism with NODE. +The Van der Pol system has 3 physical parameters φ = [ϵ, δ, ω]. It exhibits limit cycles +due to the negative damping for small oscillation amplitudes. +Fig. 2(e) shows that the +evolution of limit cycles due to the change of physical parameters is well predicted. Three +principal axes can be found for the identified η. The one for ω is shown in Fig. 2(f) (see SM +[27] for the other two). Again, the mapping from η to φ = [ϵ, δ, ω] can be constructed as a +diffeomorphism. +The fast adaptation of iMODE is demonstrated with the bistable systems in Fig. 3(a). +The iMODE is able to adjust the adaptation parameters in 5 steps to learn the dynamics of +unseen system instances. Training the same network from scratch (random initialization) on +the same test dataset requires much more epochs to achieve a comparable accuracy. When +evaluated on trajectories with unseen initial conditions, the performance of iMODE-adapted +models outperforms that of the model trained from scratch by several orders of magnitude, +showing superior generalization ability with limited data (see SM [27] for a more disparate +comparison when data is scarce). +Second, we demonstrate the combination of the iMODE algorithm with certain physics +priors for efficient modeling of more complicated systems. Since iMODE does not assume +specific architecture of f θ, a wide range of neural network architectures can be adopted to +embed appropriate inductive biases. For example, in bistable and the following wall bouncing +and Slinky systems, the assumption of conservative force is introduced, where the system +dynamics is determined by a potential energy function. Accordingly we take a specific form +for the neural force estimator f θ(x; η) = ∂Eθ(x; η)/∂x. That is, the NN first outputs an +energy field and then induces the force field from the energy field (using auto-differentiation +7 + +[28]). In this way, iMODE enables the fast adaptation of not only the force field but also the +potential energy field for the parametric systems. The learned potential energy functions +are shown in Fig. 3(b). The wall bouncing system has a potential energy well that is not a +linear function of the well’s (half-)width w or the particle position x (see SM [27]). However, +iMODE is still able to approximate the discontinuous energy function. η correlates well with +the true width w, i.e., we can control the width of the potential energy well by tuning η (see +SM [27]). +The intrinsic dimension dφ of the physical parameters φ can be estimated by applying +Principal Component Analysis (PCA) to the collection of the η vectors, each adapted to one +of the system instances. Using an “elbow” method on the cumulative explained variance +ratio curve of the PCA result, the number of the principal components that explain the +most of the variance has a good correspondence with dφ, as long as dη ≥ dφ, where dη is +the dimension chosen for η. The PCA results on the pendulum, bistable system, and Van +der Pol system are shown in Fig. 3(c) (see SM [27] for the results of other systems). Taking +the Van der Pol system as an example, dη is respectively 3, 4 or 5 for the three curves with +triangle markers. In all three cases, the first three principal components explain more than +99% of the variance, and the “elbow” appears at 3, which corresponds well with the fact +that dφ = 3 for the Van der Pol system. +Neural Gauge: Without labels for the physical parameters, iMODE develops a latent +space of adaptation parameters accounting for the variations in dynamics among system +instances. +Given the physical parameter labels of the system instances in the training +data, a mapping between the space of the physical parameters and the latent space can be +established so that the corresponding physical parameters can be estimated given any point +in the latent space. iMODE therefore can be exploited as a “Neural Gauge” to identify the +physical parameters of unseen system instances, and the establishment of such mappings can +be seen as a calibration process. We propose to construct such mappings as diffeomorphism, +which can be learned with a neural ODE dz(t)/dt = gξ(z), such that starting from a given +point in the latent space, z(0) = ηi, the state z at t = 1 gives the corresponding physical +parameters, z(1) = φi, i = 1, . . . , Ns. For simplicity, the dimension of the latent space and +that of the physical parameter space are assumed to match (dη = dφ); see SM [27] for more +general treatment. gξ is a NN whose weights are optimized by ξ = arg minξ +� +i ∥zi(1)−φi∥2 +2. +Figure (3)(d) shows the learned diffeomorphism for the bistable system. The diffeomor- +8 + +phism establishes a bijection between the physical space and the latent space so that a +grid in the physical parameter space can be continuously transformed into the adaptation +parameter space (see SM [27]). The visualization highlights the advantages of diffeomor- +phism mapping: (1) The transformation is smooth so that the local geometric structure +is preserved; (2) Invertible transformation allows better interpretation of the latent space +compared to degenerating ones. +After constructing the diffeomorphism, we test the physical parameter identification per- +formance on 100 randomly selected unseen instances (with random physical parameters). +The identification error and time cost are shown in Fig. (3)(e) for pendulum, bistable, and +Van der Pol systems. The end-to-end identification starting from data-feeding normally +takes around 2 seconds. +Complex systems: We further demonstrate that iMODE applies to complex systems +with two examples: a 40-cycle Slinky and a reaction-diffusion system described by the +Kolmogorov-Petrovsky-Piskunov (KPP) equation. In the Slinky case, we embed Euclidean +invariance for the energy field and induce equivariance for the force field. iMODE is able to +learn from 4 Slinky cases (of Young’s modulus 50, 60, 70, and 80 GPa, dropping under gravity +from a horizontal initial configuration with both ends fixed) and then quickly generalize (with +2 adaptation steps) to an unseen Slinky (of Young’s modulus 56 GPa) under unseen initial +and boundary conditions. In the KPP equation case, iMODE is able to learn the reaction +term with different reaction strength coefficients in 5 adaptation steps under Neumann +boundary conditions and directly generalize to unseen Dirichlet boundary conditions. Refer +to SM [27] for details. +We have presented the iMODE method, i.e., interpretable meta NODE. As a major +difference from existing NN-based methods, iMODE learns meta-knowledge on a family of +dynamical systems, specifically the functional variation of the derivative (force) field. It +constructs a parametrized functional form of the derivative field with a shared NN across +system instances and latent adaptation parameters adapted for different instances. The +NN and adaptation parameters are learned from the difference between the ground truth +and the solution calculated by an appropriate ODE solver. We have validated with various +examples the generalizability, interpretability, and fast adaptation ability of the iMODE +method. iMODE opens a potential avenue for modeling and real-time control problems +where the underlying systems are rapidly changing. +9 + +∗ V.R.: vwani@ee.ucla.edu, M.K.J.: khalidjm@seas.ucla.edu +[1] J. Sprakel, S. B. Lindstr¨om, T. E. Kodger, and D. A. Weitz, Stress enhancement in the delayed +yielding of colloidal gels, Physical Review Letters 106, 248303 (2011). +[2] M. K. Jawed, P. Dieleman, B. Audoly, and P. M. Reis, Untangling the mechanics and topology +in the frictional response of long overhand elastic knots, Physical Review Letters 115, 118302 +(2015). +[3] R. Alert, A. Mart´ınez-Calvo, and S. S. Datta, Cellular sensing governs the stability of chemo- +tactic fronts, Physical Review Letters 128, 148101 (2022). +[4] G. E. Karniadakis, I. G. Kevrekidis, L. Lu, P. Perdikaris, S. Wang, and L. Yang, Physics- +informed machine learning, Nature Reviews Physics , 1ˆa€“19 (2021). +[5] M. Raissi, Deep hidden physics models: Deep learning of nonlinear partial differential equations, +Journal of Machine Learning Research 19, 1 (2018). +[6] R. T. Q. Chen, Y. Rubanova, J. Bettencourt, and D. Duvenaud, Neural ordinary differential +equations, Advances in Neural Information Processing Systems (2018). +[7] S. L. Brunton, J. L. Proctor, and J. N. Kutz, Discovering governing equations from data by +sparse identification of nonlinear dynamical systems, Proceedings of the National Academy of +Sciences 113, 3932 (2016). +[8] K. Champion, B. Lusch, J. N. Kutz, and S. L. Brunton, Data-driven discovery of coordinates +and governing equations, Proceedings of the National Academy of Sciences 116, 22445 (2019). +[9] B. Chen, K. Huang, S. Raghupathi, I. Chandratreya, Q. Du, and H. Lipson, Automated dis- +covery of fundamental variables hidden in experimental data, Nature Computational Science +2, 433 (2022). +[10] R. Iten, T. Metger, H. Wilming, L. d. Rio, and R. Renner, Discovering physical concepts with +neural networks, Physical Review Letters 124, 010508 (2020). +[11] Z. Liu and M. Tegmark, Machine learning conservation laws from trajectories, Physical Review +Letters 126, 180604 (2021). +[12] Z. Liu and M. Tegmark, Machine learning hidden symmetries, Physical Review Letters 128, +180201 (2022). +[13] K. Lee and E. J. Parish, Parameterized neural ordinary differential equations: Applications to +10 + +computational physics problems, Proceedings of the Royal Society A: Mathematical, Physical +and Engineering Sciences 477, 20210162 (2021). +[14] S. Desai, M. Mattheakis, H. Joy, P. Protopapas, and S. J. Roberts, One-shot transfer learning +of physics-informed neural networks, in ICML 2022 2nd AI for Science Workshop (2022). +[15] Z. Li, N. Kovachki, K. Azizzadenesheli, B. Liu, K. Bhattacharya, A. Stuart, and A. Anand- +kumar, Neural operator: +Graph kernel network for partial differential equations (2020), +arXiv:2003.03485 [cs.LG]. +[16] Z. Li, N. B. Kovachki, K. Azizzadenesheli, B. Liu, K. Bhattacharya, A. Stuart, and A. Anand- +kumar, Fourier neural operator for parametric partial differential equations, in International +Conference on Learning Representations (2020). +[17] L. Lu, P. Jin, G. Pang, Z. Zhang, and G. E. Karniadakis, Learning nonlinear operators via +deeponet based on the universal approximation theorem of operators, Nature Machine Intelli- +gence 3, 218ˆa€“229 (2021). +[18] S. Wang, H. Wang, and P. Perdikaris, Learning the solution operator of parametric partial +differential equations with physics-informed DeepONets, Science Advances 7, eabi8605 (2021). +[19] H. Wang, H. Zhao, and B. Li, Bridging multi-task learning and meta-learning: Towards +efficient training and effective adaptation, in Proceedings of the 38th International Conference +on Machine Learning (PMLR, 2021) pp. 10991–11002. +[20] C. Finn, P. Abbeel, and S. Levine, Model-agnostic meta-learning for fast adaptation of deep +networks, in International conference on machine learning (PMLR, 2017) pp. 1126–1135. +[21] A. Nichol, J. Achiam, and J. Schulman, On first-order meta-learning algorithms, CoRR +abs/1803.02999 (2018), 1803.02999. +[22] C. Finn, A. Rajeswaran, S. Kakade, and S. Levine, Online meta-learning, in Proceedings +of the 36th International Conference on Machine Learning, Proceedings of Machine Learning +Research, Vol. 97, edited by K. Chaudhuri and R. Salakhutdinov (PMLR, 2019) pp. 1920–1930. +[23] A. Rajeswaran, C. Finn, S. M. Kakade, and S. Levine, Meta-learning with implicit gradients, +in Advances in Neural Information Processing Systems, Vol. 32 (Curran Associates, Inc., 2019). +[24] A. Raghu, M. Raghu, S. Bengio, and O. Vinyals, Rapid learning or feature reuse? Towards +understanding the effectiveness of MAML, in International Conference on Learning Represen- +tations (2019). +[25] R. T. Q. Chen, B. Amos, and M. Nickel, Learning neural event functions for ordinary differ- +11 + +ential equations, International Conference on Learning Representations (2021). +[1] Q. Li, T. Wang, V. Roychowdhury, and M. Jawed, Rapidly encoding generalizable dynamics +in a Euclidean symmetric neural network, Extreme Mechanics Letters , 101925 (2022). +[27] See supplemental material for details on iMODE training, wall bouncing system, double pen- +dulum system, dimension determination of latent space, demonstration on complex systems, +and movies on diffeomorphism. +[28] A. Paszke, S. Gross, F. Massa, A. Lerer, J. Bradbury, G. Chanan, T. Killeen, Z. Lin, +N. Gimelshein, L. Antiga, A. Desmaison, A. Kopf, E. Yang, Z. DeVito, M. Raison, A. Te- +jani, S. Chilamkurthy, B. Steiner, L. Fang, J. Bai, and S. Chintala, Pytorch: An imperative +style, high-performance deep learning library, in Advances in Neural Information Processing +Systems 32, edited by H. Wallach, H. Larochelle, A. Beygelzimer, F. d’Alch´e Buc, E. Fox, and +R. Garnett (Curran Associates, Inc., 2019) pp. 8024–8035. +12 + +Invertible +Transformation +Latent Space +Physical Space +k1 +k3 +η1 +η2 +1 +2 +3 +4 +5 +Order +0.6 +0.7 +0.8 +0.9 +1 +Explained variance ratio +Pendulum +Bistable +Van der Pol +0.99 +[s] +Loss +0 5 +20 +40 +60 +80 +100 +Epochs +10-6 +10-4 +10-2 +10-1 +1 +2 +3 +4 +5 +Order +Explained variance ratio +1 +0.9 +0.8 +0.7 +0.6 +4 +3 +2 +1 +0 +0.4 +0.3 +0.2 +0.1 +0 +[s] +2 +1 +0 +-1 +-2 +2 +1 +0 +x (m) +Pendulum +Bistable +Van der Pol +(a) +(b) +(d) +(c) +(e) +width +increasing +width +0.2 m +2.0 m +Fast adaptation +Median +25% and 75% quantiles +Figure 3. (color online). (a) Comparison of iMODE test adaptation v.s. training from scratch +on (50) unseen bistable system instances with randomly chosen physical parameters. +iMODE +demonstrates fast adaptation and good generalization within the first 5 adaptation steps. +(b) +The true and learned potential energy functions for the wall bouncing system. +The width of +the potential well increases as the adaptation parameter increases. (c) The number of top PCA +components that preserve a significant portion (> 99%) of the variance gives a good estimation of +the dimension of true physical parameters. (d) The diffeomorphism constructed by NODE for the +bistable system. It shows how a grid in the physical space is continuously deformed into the latent +space of adaptation parameters. (e) The mean error and computation time of Neural Gauge for +100 systems with randomly generated unseen parameters. +13 + +27 +1 +0 +UU2lation10-Fscratc +s woJ +scratch + uoJ +AODE +o2or train + test +tatior10 +9-Pred-1-True0 +m1 +-211 +-2C +aseNone +0 +24 +32ergy( +1Case +No655Energy +1 +010pred-true05 +201 +40 +E60 +pochs8010065 +4 +310 +--LOSS-iNMODE +evallSupplemental Information +Testing performance of the iMODE method +-2 +-1 +0 +1 +2 +x (m) +-4 +-2 +0 +2 +4 +v (m/s) + +(a) +(b) +(c) +Figure S1. (color online). The testing performance of (a) the pendulum system, (b) the bistable +system, (c) the Van der Pol system. The solid lines are ground truth. In (a) and (b) the circles, in +(c) the dashed lines, are predictions of corresponding iMODE models. Different colors represent +different parametric systems. +Oscillating pendulum +ml2¨θ + mgl sin (θ) = 0 +s.t. +θ(0) = θ0, ˙θ(0) = ˙θ0 +(7) +The training dataset contains 5 system instances with l = [1, 3, 5, 7, 9] m. A long trajectory +of 10 s is generated for each instance with the initial position and velocity π/2 and 0 s−1 and +time marching stepsize 10 ms. During training, a batch of 20 trajectories of 1 s are pulled +out randomly in each epoch. So essentially the iMODE training is seeing 1 s trajectories +with differential initial conditions. +The learnt iMODE model is tested on 8 unseen system instances with l = [2, 3.5, 4, 5.1, 6, 6.9, 8, 10] +m. The task adaptation is similarly done as the training, i.e., seeing a batch of 20 randomly +pulled-out trajectories of 1 s for each testing system instance. The task adaptation only +takes 5 steps. Then the learnt model for each instance is used to calculate a trajectory of +5 s given an initial condition, and compared with ground truth. The results are shown in +Fig. S1(a). The solid lines (the ground truth) match well with the circles (prediction). +14 + +V0 +m/s)8 +888.5 +-1V0 +(m/s).52 +0 +(m) +X0 +(m) +X54T米1.500000K1ε increasing +δ increasing +ω increasing +Figure S2. +(color online). +The three variation directions in the latent space for the physical +parameters of the Van der Pol system ϵ, δ, and ω. +Bistable oscillator +¨x + k1x + k3x3 = 0 +s.t. +x(0) = x0, ˙x(0) = ˙x0 +(8) +The training dataset contains 20 system instances, a mesh of k1 = [−0.4, −0.6, −0.8, −1.0] +and k3 = [2.0, 2.9, 3.7, 4.6, 5.0]. Trajectories of multiple initial conditions with stepsize 10 ms +and time span 10 s are generated for each instance. During training, a batch of 100 randomly +pulled-out trajectories of 1 s is used for each epoch. During testing, task adaptation takes +5 steps on previously unseen systems [k1, k3] = [−0.5, 3.1], [−0.7, 4.2], [−0.5, 4.7]. The learnt +models calculate trajectories of 5 s given an initial condition. The results are shown in +Fig. S1(b). +Van der Pol system +¨x − ϵ ˙x(1 − δx2) + ω2x = 0 +s.t. +x(0) = x0, ˙x(0) = ˙x0 +(9) +The training dataset contains 27 system instances, a mesh of ϵ = [1.0, 2.0, 3.0], δ = +[1.0, 2.0, 3.0], and ω = [0.5, 1.0, 1.5]. Trajectories of multiple initial conditions with step- +size 10 ms and time span 10 s are generated for each instance. +During training, a +batch of 100 randomly pulled-out trajectories of 1 s is used for each epoch. +During +testing, task adaptation takes 5 steps on previously unseen systems instances [ϵ, δ, ω] = +[1.2, 1.2, 2.1], [1.2, 1.8, 1.4], [2.6, 1.5, 2.5]. The learnt models calculate trajectories of 5 s given +an initial condition. The results are shown in Fig. S1(c). +15 + +1.5 +M21 +0.60.571n0.8 +1 +M11.50.5 +M1 +11.6 +.81.4 +M21.211.2 +1.41.5 +10.51.615=3.01.55=1.0 +5=2.001.81.6 +1.4 +1.2 +M211 +0.5 +M10.01.5L +211.51.53-0.5E +E +E二1.0 +2.0 +=3.0The three variation directions in the latent space η ∈ R3 for ϵ, δ, and ω are shown in +Fig. S2. +Other systems + + +(a) +(b) +Figure S3. (color online). (a) The wall bouncing system. (b) The double pendulum system. +Wall bouncing system +The governing equation for the wall bouncing system (Fig. S3(a)) is +¨x+F(x) = 0 +s.t. +x(0) = x0, ˙x(0) = ˙x0 +F(x) = +� −k(x − w), x ≥ w +0 +, |x| < w +−k(x + w), x ≤ −w +(10) +x and v are the particle position and velocity, k = 1000 N/m is a large constant to approx- +imate a stiff wall, w is the (half-)width of the potential well, as shown in Fig. S3(a). The +system has a potential energy well with the following form +E(x) = +� +� +� +0, +|x| < w +∞, +|x| ≥ w +(11) +The training dataset contains 10 system instances, with the width increasing from 0.1 m +to 1.0 m by 0.1 m. Trajectories of multiple initial conditions (initial position 0 m, and +initial velocities ranging from 0.1 m/s to 1.0 m/s) with stepsize 10 ms and time span 10 +s are generated for each instance. +During training, a batch of 100 randomly pulled-out +trajectories of 1 s is used for each epoch. +16 + +In this case the intermediate output of the NN is the energy and the force is derived by +taking the derivative of the output with respect to the input, i.e. +F = ∂E +∂x = ∂(NNθ(x; η) + NNθ(−x; η)) +∂x +(12) +The second equality takes advantage of the assumption that the energy is symmetric with +respect to x. The learning results show η ∈ R to be in perfect correlation with the width w +of the potential well (Fig. S4). In other words, we can control the width of the constructed +potential well of the NN, which is another way to interpret the physical meaning of the +adaptation parameter η. +1 +3 +5 +7 +9 +case No. +0 +0.25 +0.5 +0.75 +1 +Width (m) +0 +3 +6 +9 +12 +Figure S4. (color online). The true widths of the wall bouncing system and the learnt adaptation +parameters η are in perfect correlation (99.87%). +Double pendulum +The double pendulum, as shown in Fig. S3(b), has two masses m1 = m2 = 1 kg and arm +lengths L1 and L2. The governing equations are +˙θ1 = ω1 +˙θ2 = ω2 +˙ω1 = −g (2m1 + m2) sin θ1 − m2g sin (θ1 − 2θ2) − 2 sin (θ1 − θ2) m2 (ω2 +2L2 + ω2 +1L1 cos (θ1 − θ2)) +L1 (2m1 + m2 − m2 cos (2θ1 − 2θ2)) +˙ω2 = 2 sin (θ1 − θ2) (ω2 +1L1 (m1 + m2) + g (m1 + m2) cos θ1 + ω2 +2L2m2 cos (θ1 − θ2)) +L2 (2m1 + m2 − m2 cos (2θ1 − 2θ2)) +(13) +17 + +0 +0.2 +0.4 +0.6 +0.8 +0 +0.2 +0.4 +0.6 +0.8 +1 +0 +0.2 +0.4 +0.6 +0.8 +0 +0.2 +0.4 +0.6 +0.8 +1 +L1 increasing +0.8 m +0.5 m +L2 increasing +0.8 m +0.5 m +(a) +(b) +Figure S5. (color online). The learnt latent space of adaptation parameters for the double pendu- +lum system. (a) Each marked line shows systems with the same L1. (b) Each marked line shows +systems with the same L2. There are clearly two directions in the latent space (indicated by the +arrows) corresponding to the change of physical parameters L1 and L2. +The training dataset contains 16 system instances, a mesh of L1 = [0.5, 0.6, 0.7, 0.8] m and +L2 = [0.5, 0.6, 0.7, 0.8] m. Trajectories of initial locations [π/4,π/4] and initial velocities [0,0] +with stepsize 10 ms and time span 10 s are generated for each system. During training, a +batch of 100 randomly pulled-out trajectories of 1 s is used for each epoch. Task adaptation +takes 5 steps. The learnt latent space of adaptation parameters is shown in Fig. S5. It is +clear that two directions exist corresponding to the variation of physical parameters L1 and +L2. This again underlines the interpretability of η ∈ R2. +Comparison between iMODE and training from scratch +We compare the performance of iMODE adaptation to the “training from scratch” (TFS) +approach. The iMODE adaptation starts with a weight initialization trained from a train- +ing dataset. It updates the adaptation parameter η ∈ R2 on a testing dataset, which is not +included in the training dataset. The TFS approach uses the same NN architecture and hy- +perparameters as in the iMODE. The TFS NN is randomly initialized and all the weights are +updated on the same testing dataset. After training the TFS NN and adapting the iMODE +η NN using the same testing dataset, the two NNs are evaluated on an unseen evaluation +dataset. As shown in Fig. 3(a), the iMODE significantly outperforms the TFS approach in +terms of adaptation speed (v.s. training speed in the TFS approach) and evaluation accu- +18 + +0 5 +20 +40 +60 +80 +100 +Epochs +10 -8 +10 -6 +10 -4 +10 -3 +10 -2 +10 -1 +Loss +From scratch test +From scratch eval +iMODE test +iMODE eval +-1 +-0.5 +0 +0.5 +1 +x (m) +0 +0.04 +0.08 +0.12 +Energy (J) +From scratch +iMODE +True +(a) +(b) +Figure S6. (color online). (a) The training/adaptation and evaluation performance of the TFS and +iMODE NNs, given a single trajectory of the bistable system with physical parameters k1 = −1.0 +and k3 = 2.0, and initial condition x0 = 0.7 m and ˙x0 = 0 m/s. (b) The iMODE learns the correct +double-well potential energy function while the TFS approach learns nothing due to data scarcity. +racy. This means that the iMODE approach can learn the dynamics of an unseen system +more rapidly and predict future events more accurately than a TFS NN. This observation is +pronounced in the following case: we feed these two NNs a single trajectory of the bistable +system with physical parameters k1 = −1.0 and k3 = 2.0, and initial condition x0 = 0.7 m +and ˙x0 = 0 m/s. After training/adaptation, we evaluate the TFS and iMODE NNs on tra- +jectories of the same system but with differential initial conditions. The training/adaptation +and evaluation curves are shown in Fig. S6(a). The iMODE outperforms the TFS approach +in both training/adaptation and evaluation accuracy. The learnt energy functions of both +approaches are compared in Fig. S6(b). The energy function of the TFS NN is totally in- +correct due to the data scarcity. Under this specific initial condition, the bistable system is +only oscillating intra-well. So the information contained in the trajectory is insufficient to +depict the entire potential energy surface. Meanwhile the iMODE NN learns an accurate +double-potential-well function from the same data because appropriate prior knowledge on +the energy functions of bistable systems (i.e. double-well) is already embedded in its weight +initialization. +19 + +Dimension determination of physical parameters with PCA +The workflow of using PCA to determine the optimal dimension d of the adaptation +parameters η for a parametric system is: (1) Make a rough guess ˜d on the dimension, then +run the iMODE algorithm on the trajectories of Ns systems; (2) Form a matrix with the +results M = [η1, η2, . . . , ηNs]; (3) Perform PCA on M. A significant portion of variance (e.g. +99%) will be preserved in the first ˆd dimensions, an estimation for d; (4) repeat the process +with different initial guesses ˜d. The optimal dimension is more credible when different ˜d +results in the same ˆd. +1 +2 +3 +4 +5 +Order +0.6 +0.7 +0.8 +0.9 +1 +Ratio +Pendulum +1 +2 +3 +4 +5 +Order +0.6 +0.7 +0.8 +0.9 +1 +Wall bouncing +1 +2 +3 +4 +5 +Order +0.6 +0.7 +0.8 +0.9 +1 +Bistable +1 +2 +3 +4 +5 +Order +0.6 +0.7 +0.8 +0.9 +1 +Double pendulum +1 +2 +3 +4 +5 +Order +0.6 +0.7 +0.8 +0.9 +1 +Van der Pol +Figure S7. (color online). PCA can be used to determine the optimal dimensions of the adaptation +parameters η in the studied systems. These optimal dimensions prove to equal the true dimension +dφ of the physical parameters φ of the systems. The red dashed line indicates 0.99. +The PCA determination results of all systems are shown in Fig. S7. The red dashed lines +mark the 99% variance preservation. Dotted lines in each case mean that the initial guess +˜d is the dimension of real physical parameters dφ plus 2. For example, in the Van der Pol +system, the dotted line means that the initial guess ˜d = 5. After the PCA, if we preserve +4 or 3 principal components, the variance energy is still preserved by more than 99%. If +we further reduce the number of preserved principal components to 2 or 1, we see a sudden +drop (to below the 99% threshold), which indicates the optimal dimension to be 3. The ˜d for +dashed-dotted and solid lines are the dimension of real physical parameters dφ plus 1 and 0 +respectively. With different ˜d, we can repeatedly confirm the optimal dimension of η, to be +3 in the case of Van der Pol system (which is the true dimension of physical parameters). +For other systems, the workflow is the same. +20 + +Neural Gauge diffeomorphism +As suggested by the PCA analysis in Section S, the adaptation parameters {ηi}Ns +i=1 that +are adapted to the system instances belonging to a family of dynamical systems occupy +a dφ-dimensional manifold, even if the latent space they reside in is dη-dimensional and +dη ≥ dφ. Therefore, a diffeomorphism can be established mapping η in the latent space to +their corresponding physical parameters ({φi}Ns +i=1) even if their dimensions do not match, +assuming dη ≥ dφ. +Practically, the neural ODE modelling such diffeomorphism can be +defined as dz(t)/dt = gξ(z), such that for i = 1, . . . , Ns, starting from a given point in the +latent space, z(0) = ηi, the state z at t = 1, z(1) = +� +φT +i +0 . . . 0 +�T +is the concatenation of +corresponding physical parameters and dη − dφ padding zeros. +Complex cases +Slinky: the Euclidean symmetric neural network + +Figure S8. (color online). The Euclidean invariance on energy and induced equivariance on force +of the NN used in the Slinky system case. All the Euclidean transformed configurations have the +same energy as the original configuration. The elastic forces on the middle bars are Euclidean- +transformed accordingly. +The NN used in the Slinky case follows the Euclidean symmetric neural network (ESNN) +21 + +[1] architecture. The Slinky is decomposed into 40 consecutive triplets, i.e., the 2D rep- +resentation of 3 adjacent cycles. +We denote the coordinates of the ith triplet as ξi = +� +xT +i−1, xT +i , xT +i+1 +�T ∈ R9. xi ∈ R3 is the coordinates of the ith bar, including the x and y +coordinates of the bar center and the inclination angle of the bar. The potential energy +associated with the middle bar of a triplet is only a function of the coordinates of the 3 bars +of the same triplet (and of the adaptation parameters), i.e. +Ei = Ei(ξi; η) +(14) +In the following we will omit the subscript i for brevity. The induced force F from E is +F = ∂E +∂xi += ∂E(z; η) +∂xi += ∂E(ξ; η) +∂xi +(15) +where z ∈ R6 is the relative coordinates between the bars of the ith triplet. We enforce +Euclidean invariance, i.e., translational, rotational, and chiral invariance, on E with respect +to ξ, by taking E(z) the following form, i.e., the ESNN +E(z; η) = NNθ(z; η) + NNθ (Rx(z); η) + NNθ (Ry(z); η) + NNθ (Rx (Ry(z)) ; η) +(16) +where Rx(·) and Ry(·) denote reflection with respect to the x and y axes. Note that +Rx (Rx(·)) = I(·), Ry (Ry(·)) = I(·), and Rx (Ry(·)) = Ry (Rx(·)) +(17) +where I(·) is the identity operation. It is easy to prove the chiral invariance of E, i.e., +E(z; η) = E (Rx(z); η) = E (Ry(z); η) = E (Rx (Ry(z)) ; η) +(18) +Then from Eq. (15), F is equivariant to rigid body and chiral transformations on ξ, as shown +in Fig. S8, including translation, rotation, and reflection, regardless of η. The ESNN will be +applied on each triplet in the Slinky to calculate the elastic force acting on each bar. The +assembled force vector is used to update the system state inside the NODE framework. The +difference between the true and predicted trajectories is used to update the ESNN weights θ. +After training and performing trajectory predictions in 2D, a geometric method can be used +to reconstruct the 3D Slinky configurations [1]. See [1] for more implementation details. +The training dataset contains 4 Slinkies of different Young’s modulus (50, 60, 70, and 80 +GPa). The Slinkies are clamped at both ends and freely drop under gravity from a horizontal +initial configuration. Two inner steps are taken to update η ∈ R for each Slinky. Note when +22 + +(a) +(b) +True +NN +True +NN +D +D = 68mm +Figure S9. (color online). (a) The testing performance of the iMODE model on an unseen Slinky +(of an unseen Young’s modulus) with the same boundary condition as the training dataset. Top +and bottom rows are ground truth and the iMODE model prediction at 0.28, 0.47, 0.65, 0.83 s +(left to right). (b) The testing performance of the iMODE model on unseen initial and boundary +conditions. Top and bottom rows are ground truth and the iMODE model prediction at 0.15, 0.32, +0.48, 0.65 s (left to right). +η is updated, the NN always preserves energy invariance and force equivariance with respect +to the coordinates of the Slinky. After training the NN, we perform task adaptation (2 +steps) on a unseen Slinky of Young’s modulus 56 GPa and observe a good fitting result +(Fig. S9(a)). The resulting NN is then directly applied to computation under an unseen +boundary condition and Slinky orientation (the bottom end is free and the Slinky drops +under gravity from a vertical initial configuration) without any modification (Fig. S9(b)). +23 + +We can achieve this because the model-agnostic nature of the iMODE method allows us to +embed the Euclidean symmetries into the NN. +Kolmogorov-Petrovsky-Piskunov (KPP) equation +To solve the KPP equation, we discretize the spatial domain [0,1] into 20 segments. So +the the partial differential equation system (here x denotes the spatial coordinate) +∂u +∂t = D∂2u +∂x2 + ru(1 − u) +(19) +is represented by an ordinary differential equation system containing 21 variables. +The +diffusion term is approximated by 2nd-order central difference and the diffusivity is assumed +known. The meta-learning is performed to learn the reaction term with different reaction +strength coefficients r (without knowing the mathematical form). +The training dataset +contains 5 systems with r = 0.01, 0.02, 0.03, 0.04, 0.05. The Neumann boundary condition +u′(0) = u′(1) = 0 (′ denotes derivative with respect to x) is used across the training dataset. +The iMODE task adaptation takes 5 iterations. The training results for r = 0.01, 0.05 +are shown in Fig. S10(a). The iMODE NN is then adapted on the data from a unseen +system instance with r = 0.034. The resulted NN is directly applied to computation with +unseen initial and boundary conditions (Dirichlet type u(0) = u(1) = 1). The results of the +latter are shown in Fig. S10(b) and a good agreement is observed. This again validates the +capability of the iMODE algorithm to fast adapt on unseen complex parametric systems and +accurately predict on initial and boundary conditions different from those in the training +dataset. +Another testing result for the KPP system is shown in Fig. S10(c). The testing has the +same type of boundary condition (u′(0) = u′(1) = 0) as the training dataset but an unseen +initial condition. The prediction (right) matches the ground truth (left) well. +Neural network architecture +Throughout this letter, we use a DenseNet-like architecture [2] for our neural networks +(NNs), where shortcut pathways are created for a layer from all its previous layers. It takes +in the input and first increases the feature dimension to 32 by a fully-connected (FC) layer. +24 + +Then the feature is passed through FC layers with Softplus activation. A new feature with +an increased dimension is formed by concatenating the previous feature with the FC layer +output, i.e., +fi = +� +�FC(fi−1) ∈ R32 +fi−1 +� +� , +i = (1, 2, . . . , 5) +f0 = FC(Input) ∈ R32 +Output = FC(f5) +(20) +where fi is the feature map for the ith layer. After passing through 5 densely connected +layers, the feature dimension is increased to 192. This feature is then passed through a FC +layer with no activation to produce the final output. +For pendulum, bistable, wall bouncing, and Slinky systems, the NN input is the vector +concatenating the system position x and the adaptation parameter η, i.e. [xT, ηT]T. The +output is a scalar, i.e. the energy of the system. The force vector is calculated by back- +propagating the NN output with respect to x. For Van der Pol system, the input is the +vector concatenating the system state y and η, i.e. [yT, ηT]T. The output is the force vector. +For KPP system, the input is [u, η]. The output is the reaction forcing term. +Supplementary movie +Movie S1. The diffeomorphism for the bistable system. The data points are transformed +from the physical space (subtracted mean) to the latent space of adaptation parameters +(subtracted mean). The right subplot is the enlarged view of the left plot. +Movie S2. The diffeomorphism for the Van der Pol system. The data points are trans- +formed from the physical space (subtracted mean) to the latent space of adaptation param- +eters (subtracted mean). The right subplot is the enlarged view of the left plot. +∗ V.R.: vwani@ee.ucla.edu, M.K.J.: khalidjm@seas.ucla.edu +[1] Q. Li, T. Wang, V. Roychowdhury, M. Jawed, Rapidly encoding generalizable dynamics in a +Euclidean symmetric neural network, Extreme Mechanics Letters (2022) 101925. +25 + +[2] G. Huang, Z. Liu, L. Van Der Maaten, K. Q. Weinberger, Densely connected convolutional +networks, in: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), +2017, pp. 2261–2269. +26 + +0 +0.25 +0.5 +0.75 +1 +x (m) +0 +0.2 +0.4 +0.6 +0.8 +1 +u +0 +0.25 +0.5 +0.75 +1 +x (m) +0 +0.2 +0.4 +0.6 +0.8 +1 +u +(a) +t +(b) +(c) +Figure S10. (color online). (a) The training results of the iMODE algorithm on the KPP system +for r = 0.01 (left) and r = 0.05 (right). Solid lines are ground truth. Dashed lines are iMODE +predictions. The arrow indicates time marching of u. (b) The iMODE testing results on an unseen +system r = 0.034 with an unseen boundary condition. The ground truth (left) match the iMODE +prediction (right) well. (c) The iMODE testing results (r = 0.034) on an unseen initial condition +from the training dataset. The ground truth (left) match the iMODE prediction (right) well. +27 + +00.25 +0.5 +(m) +X0.75 +100.25 +0.5 +(m) +X0.75 +17.57.5S +15S +1522.522.530307.500.25 +0.5 +(m) +X0.75 +10 +0.225 +0.5 +0.75 +(m) +X10.27.57.50.4S +157.5S +150.622.522.50.830300.75 +1S +15S +1522.5022.530300.25 +0.5 +(m) +X00.75 +10.25 +0.5 +(m) +X0.75 +10 +0.225 +0.5 +0.75 +(m) +X10.27.57.50.4S +15S +150.622.522.50.8303000.25 +0.5 +(m) +X +Figure S11. (color online). The DenseNet-like structure. +28 + diff --git a/wtAzT4oBgHgl3EQfCPrg/content/tmp_files/load_file.txt b/wtAzT4oBgHgl3EQfCPrg/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..38b3f0a8be8f3e640df9fdeadc654c89fb2bacaa --- /dev/null +++ b/wtAzT4oBgHgl3EQfCPrg/content/tmp_files/load_file.txt @@ -0,0 +1,1033 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf,len=1032 +page_content='APS/123-QED Meta-learning generalizable dynamics from trajectories Qiaofeng Li1,2,3, Tianyi Wang2, Vwani Roychowdhury2,∗, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Jawed1,∗ 1Dept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' of Mechanical and Aerospace Engineering, University of California, Los Angeles, CA 90095, USA 2Dept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' of Electrical and Computer Engineering, University of California, Los Angeles, CA 90095, USA 3Dept.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' of Mechanical Engineering, Massachusetts Institute of Technology, Cambridge, MA 02139, USA 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='00957v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='LG] 3 Jan 2023 Abstract We present the interpretable meta neural ordinary differential equation (iMODE) method to rapidly learn generalizable (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' not parameter-specific) dynamics from trajectories of multiple dynamical systems that vary in their physical parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The iMODE method learns meta- knowledge, the functional variations of the force field of dynamical system instances without know- ing the physical parameters, by adopting a bi-level optimization framework: an outer level cap- turing the common force field form among studied dynamical system instances and an inner level adapting to individual system instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' A priori physical knowledge can be conveniently em- bedded in the neural network architecture as inductive bias, such as conservative force field and Euclidean symmetry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' With the learned meta-knowledge, iMODE can model an unseen system within seconds, and inversely reveal knowledge on the physical parameters of a system, or as a Neural Gauge to “measure” the physical parameters of an unseen system with observed trajec- tories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' We test the validity of the iMODE method on bistable, double pendulum, Van der Pol, Slinky, and reaction-diffusion systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Building predictive models of dynamical systems is a central challenge across diverse disciplines of science and engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Traditionally, this has been achieved by first manu- ally deriving the governing equations with carefully chosen state variables and then fitting the undetermined physical parameters using observed data, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', [1–3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In order to avoid the painstaking formulation of analytical equations, researchers have recently leveraged ad- vances in machine learning and the data-fitting power of neural networks (NNs) to make the modeling process both automatic and more expressive [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' This is achieved by either adopting the conventional physics-based approach as a starting point and then replacing various components with data-driven modules [5, 6], or directly learning discrete dynamics using autoregressive models from high-dimensional observations [7–9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' These works, while promising, need to fit dedicated models separately for different system instances with dif- ferent parameters, which limits a model’s applicability to one specific instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In this letter, our goal is to learn meta-knowledge, the form of dynamics that is unre- stricted to specific physical parameters or initial/boundary conditions, on dynamical systems to reveal physical insights [10–12] and to significantly improve the generalization ability of data-driven models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Specifically, we learn the shared dynamics form from the trajectories 2 generated by a series of dynamical system instances in spite of their diversified behaviors in data, without knowing the system parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' This separates our work from Refs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [13, 14] and Neural Operators [15–18], in which true parameters should be provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' This goal aligns with that of multi-task meta-learning [19], which aims to leverage the similarities between different tasks to enable better generalization and efficient adaptation to unseen tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' We propose an efficient and interpretable method to model a family of dynamical systems using their observed trajectories, by combining gradient-based meta-learning (GBML) [20– 24] with neural ordinary differential equations (NODE) [1, 6, 25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In recognizing that the systems have shared dynamics form and varying physical parameters, we separate the model parameters into two parts: the shared parameters that capture the shared form of dynamics, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' the meta-knowledge, and the adaptation parameters that account for variations across system instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The method generalizes well on unseen systems from the same family, and the adaptation parameters show good interpretability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The intrinsic dimension of the varying system parameters can be estimated by analyzing the adaptation parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Given ground truth of the system parameters, simple correspondence can be established between the adaptation parameters and actual physical parameters through diffeomorphism, which can be utilized as a “Neural Gauge” to measure properties of new systems through observed trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' We name our method interpretable meta neural ODE (iMODE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In a general autonomous second-order system, the state of the system y contains the position (generalized coordinates) x and the velocity ˙x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The dynamics of the second-order system is expressed by ˙y = � � ˙x ¨x � � = � � ˙x M−1Fφ(y) � � , where y = � �x ˙x � � (1) where Fφ is the force vector containing all the internal and external forces, and M is the mass matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' With a set of physical parameters φ, the force function F(·) dictates the dynamics of the system, which determines a unique trajectory y(t) given an initial condition y(t0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In the remainder of the letter, without loss of generality, mass is normalized to an identity matrix, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', M = I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Trajectories are collected from multiple system instances into a dataset D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Consider Ns instances that share the dynamics form Fφ(·), but have distinct physical parameters, {φ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' , φNs} respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' From each system instance, Ntr trajectories are observed, each containing observations across T time steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In summary, D = � {yi,j(tk)}T k=0|i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' , Ns, j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' , Ntr � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 3 2 0 2 10 5 0 5 10 2 0 2 7 3 0 3 7 2 0 2 4 2 0 2 4 True Pred Force Field v x Trajectories Neural ODE v x v x v x v x v x (a) (b) (c) (d) 2 1 0 1 2 2 1 0 1 2 2 1 0 1 2 2 1 0 1 2 2 1 0 1 2 2 1 0 1 2 2 2 0 2 4 0 2 4 2 2 0 3 7 0 3 7 2 2 0 5 10 0 5 10 Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (a) In iMODE, a neural module Fθ parameterized by θ takes the concatenation of system state y and the adaptation parameters η and generates the estimated force as output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (b) The bi-level iteration process in the iMODE method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The NN weights θ are shared across system instances while η is adapted for each instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The meta gradient w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' θ aggregates the gradients evaluated with instance-adapted η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (c) Examples of estimated force field fθ(·;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) for Van der Pol system instances that differ in their ϵ parameter (in ascending order from top to bottom).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The estimation quality is further evaluated through the trajectories generated by the fields as shown in (d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (d) The estimated force field can be used to predict system trajectories for unseen initial conditions through integration (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (2)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The signature limit cycles of Van der Pol systems are faithfully reproduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The data-driven model is trained on D, knowing which trajectories are from the same system instance (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' given both the index i and j of trajectories), but is not given the knowledge of {φi}Ns i=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Take the pendulum system as an example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' An instance is a pendulum with a specific arm length (since the inertia is normalized), therefore φ includes only the arm length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' A trajectory contains the location and speed of the pendulum during a time period.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In our framework, a neural network f θ(y;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' See Supplemental Material (SM) [27] for detailed description) replaces Fφ(y) in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (1) to approximate the observed tra- jectories, where η is adapted to each system instance such that with a certain ηi, f θ(y;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' ηi) 4 1-2 2-02000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='310.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5-2 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='901-2 2-0200.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='320.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9approximates the force function of the ith system instance Fφi(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' After training, η becomes a proxy for the physical parameters φ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' θ is the model parameters that capture the functional form of dynamics shared across system instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The predicted trajectory starting from an initial condition y0 is given by integration (the 5th-order Dormand-Prince-Shampine solver is used throughout this letter to compute integrals) �y(t, y0, θ, η) = y0 + � t t0 f θ (�y(τ);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) d τ (2) For brevity, we denote the trajectory yi,j(t) as yi,j, the corresponding prediction �y(t, yi,j(t0), θ, η) as �yi,j(θ, η), and use ∥ yi,j − �yi,j(θ, η) ∥2 to denote �T k=0 � yi,j(tk) − �y(tk, yi,j(t0), θ, η) �2, the squared difference between yi,j and �yi,j(θ, η) across all time steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The goal of the modeling is formulated as a bi-level optimization (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1(b)), outer: min θ �L(θ) = 1 Ns Ns � i=1 Li(θ, η(m) i ), where (3) Li(θ, ζ) = 1 NtrT Ntr � j=1 ∥ yi,j − �yi,j(θ, ζ) ∥2, (4) inner: η(l+1) i = η(l) i − α∇ηLi(θ, η(l) i ), η(0) i = η (5) where the inner-level involves an m-step gradient descent adapting η for each instance, while the outer-level finds the optimal initialization for θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' α is the inner-level stepsize and η(m) i is the adaptation parameters for the ith system instance after m steps of adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' For short, we denote such ith adaptation result as ηi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Note that ηi depends on both θ and η as shown in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' To avoid higher-order derivatives, we simplify such dependency following the first-order Model Agnostic Meta-Learning (first-order MAML) [20] and use the outer-level step as θ ← θ − β Ns � i ∇θLi(θ, ηi), (assuming that ∂ηi ∂θ = 0) (6) where β is the outer-level stepsize.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' At both the inner-level and outer-level, the gradient calculation for functions involving integrals is enabled by NODE [1, 6, 25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1(c), f θ(·;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) specifies a force field that morphs as η changes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Note that m is normally quite small (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 5), so given trajectories of a previously unseen system, η can be efficiently updated with few gradient steps, adapting the NN to specify a force field explaining behaviors of the new system, which is one order-of-magnitude faster compared 5 to training from scratch (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 3(a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Trajectories with arbitrary initial conditions can be inferenced based on the force field (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1(d)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 2 1 0 1 2 5 2 0 2 5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='3 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0 2 4 6 8 10 1/L (m-1) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 (a) (c) (d) (b) (e) (f) Force field evolution Force field evolution k3 increase k1 increase ω increase 5 2 0 2 5-2 1 0 2 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='3 x (m) 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0 2 4 6 8 10 η 1/L (m-1) v (m/s) 0x (m) v (m/s) 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 1 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 η1 η2 x (m) v (m/s) η1 4 2 0 2 5 5 0 η2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 1 0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 1 η3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 1 Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (a) The meta-learning results for the pendulum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The iMODE trajectory prediction (circles) with different arm lengths (different colors) match those of the ground truth (solid lines).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (b) The learned η is in good correlation with the effective stiffness of different pendulums (1/L).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (c) The predicted trajectories (circles) match those of ground truth (solid lines) with different initial conditions (black stars) and different system parameters (different colors) for the bistable system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (d) Two principal axes can be identified from the latent space of the learned η, each regarding the variation of one physical parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (e) Similar to (c) but for the Van der Pol system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (f) The principal axis regarding to the variation of ω for the Van der Pol system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' First we validate the modeling capability of the iMODE algorithm on 3 cases: oscillating pendulum, bistable oscillator, and Van der Pol system (see SM [27] for detailed description).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The oscillating pendulum has 1 physical parameter, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' the arm length (rotational inertia normalized).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 2(a) shows that the predicted trajectories using task-adapted NNs match 6 /s) n000004 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 M211 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 M10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51e 0000001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 =1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 14 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 M211 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 M10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 =1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 14 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 M211 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 M10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 =1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5900 880080oe-10 m(m/s)-50 2m5024**米1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5¥01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 1the ground truth of each system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 2(b) shows that the learned η correlates well with the effective stiffness of the pendulum, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1/L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Effectively η acts as a proxy of the true arm length and can be used to infer such parameters of unseen systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The bistable system has a potential energy function controlled by 2 parameters k1 and k3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Its potential energy has two local minima, or potential wells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' When the initial conditions vary, the bistable system can oscillate intra-well or inter-well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 2(c) shows that the task adapted trajectories (m = 5) match the ground truth well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 2(d) shows that the identified η ∈ R2 has two principal axes, along which k1 and k3 increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' As mentioned, η is effectively a proxy for k1 and k3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Later we will show that the mapping from η to φ = [k1, k3] can be constructed as a diffeomorphism with NODE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The Van der Pol system has 3 physical parameters φ = [ϵ, δ, ω].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' It exhibits limit cycles due to the negative damping for small oscillation amplitudes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 2(e) shows that the evolution of limit cycles due to the change of physical parameters is well predicted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Three principal axes can be found for the identified η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The one for ω is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 2(f) (see SM [27] for the other two).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Again, the mapping from η to φ = [ϵ, δ, ω] can be constructed as a diffeomorphism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The fast adaptation of iMODE is demonstrated with the bistable systems in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 3(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The iMODE is able to adjust the adaptation parameters in 5 steps to learn the dynamics of unseen system instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Training the same network from scratch (random initialization) on the same test dataset requires much more epochs to achieve a comparable accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' When evaluated on trajectories with unseen initial conditions, the performance of iMODE-adapted models outperforms that of the model trained from scratch by several orders of magnitude, showing superior generalization ability with limited data (see SM [27] for a more disparate comparison when data is scarce).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Second, we demonstrate the combination of the iMODE algorithm with certain physics priors for efficient modeling of more complicated systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Since iMODE does not assume specific architecture of f θ, a wide range of neural network architectures can be adopted to embed appropriate inductive biases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' For example, in bistable and the following wall bouncing and Slinky systems, the assumption of conservative force is introduced, where the system dynamics is determined by a potential energy function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Accordingly we take a specific form for the neural force estimator f θ(x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) = ∂Eθ(x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η)/∂x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' That is, the NN first outputs an energy field and then induces the force field from the energy field (using auto-differentiation 7 [28]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In this way, iMODE enables the fast adaptation of not only the force field but also the potential energy field for the parametric systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The learned potential energy functions are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 3(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The wall bouncing system has a potential energy well that is not a linear function of the well’s (half-)width w or the particle position x (see SM [27]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' However, iMODE is still able to approximate the discontinuous energy function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η correlates well with the true width w, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', we can control the width of the potential energy well by tuning η (see SM [27]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The intrinsic dimension dφ of the physical parameters φ can be estimated by applying Principal Component Analysis (PCA) to the collection of the η vectors, each adapted to one of the system instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Using an “elbow” method on the cumulative explained variance ratio curve of the PCA result, the number of the principal components that explain the most of the variance has a good correspondence with dφ, as long as dη ≥ dφ, where dη is the dimension chosen for η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The PCA results on the pendulum, bistable system, and Van der Pol system are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 3(c) (see SM [27] for the results of other systems).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Taking the Van der Pol system as an example, dη is respectively 3, 4 or 5 for the three curves with triangle markers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In all three cases, the first three principal components explain more than 99% of the variance, and the “elbow” appears at 3, which corresponds well with the fact that dφ = 3 for the Van der Pol system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Neural Gauge: Without labels for the physical parameters, iMODE develops a latent space of adaptation parameters accounting for the variations in dynamics among system instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Given the physical parameter labels of the system instances in the training data, a mapping between the space of the physical parameters and the latent space can be established so that the corresponding physical parameters can be estimated given any point in the latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' iMODE therefore can be exploited as a “Neural Gauge” to identify the physical parameters of unseen system instances, and the establishment of such mappings can be seen as a calibration process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' We propose to construct such mappings as diffeomorphism, which can be learned with a neural ODE dz(t)/dt = gξ(z), such that starting from a given point in the latent space, z(0) = ηi, the state z at t = 1 gives the corresponding physical parameters, z(1) = φi, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' , Ns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' For simplicity, the dimension of the latent space and that of the physical parameter space are assumed to match (dη = dφ);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' see SM [27] for more general treatment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' gξ is a NN whose weights are optimized by ξ = arg minξ � i ∥zi(1)−φi∥2 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Figure (3)(d) shows the learned diffeomorphism for the bistable system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The diffeomor- 8 phism establishes a bijection between the physical space and the latent space so that a grid in the physical parameter space can be continuously transformed into the adaptation parameter space (see SM [27]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The visualization highlights the advantages of diffeomor- phism mapping: (1) The transformation is smooth so that the local geometric structure is preserved;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (2) Invertible transformation allows better interpretation of the latent space compared to degenerating ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' After constructing the diffeomorphism, we test the physical parameter identification per- formance on 100 randomly selected unseen instances (with random physical parameters).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The identification error and time cost are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (3)(e) for pendulum, bistable, and Van der Pol systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The end-to-end identification starting from data-feeding normally takes around 2 seconds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Complex systems: We further demonstrate that iMODE applies to complex systems with two examples: a 40-cycle Slinky and a reaction-diffusion system described by the Kolmogorov-Petrovsky-Piskunov (KPP) equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In the Slinky case, we embed Euclidean invariance for the energy field and induce equivariance for the force field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' iMODE is able to learn from 4 Slinky cases (of Young’s modulus 50, 60, 70, and 80 GPa, dropping under gravity from a horizontal initial configuration with both ends fixed) and then quickly generalize (with 2 adaptation steps) to an unseen Slinky (of Young’s modulus 56 GPa) under unseen initial and boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In the KPP equation case, iMODE is able to learn the reaction term with different reaction strength coefficients in 5 adaptation steps under Neumann boundary conditions and directly generalize to unseen Dirichlet boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Refer to SM [27] for details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' We have presented the iMODE method, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', interpretable meta NODE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' As a major difference from existing NN-based methods, iMODE learns meta-knowledge on a family of dynamical systems, specifically the functional variation of the derivative (force) field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' It constructs a parametrized functional form of the derivative field with a shared NN across system instances and latent adaptation parameters adapted for different instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The NN and adaptation parameters are learned from the difference between the ground truth and the solution calculated by an appropriate ODE solver.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' We have validated with various examples the generalizability, interpretability, and fast adaptation ability of the iMODE method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' iMODE opens a potential avenue for modeling and real-time control problems where the underlying systems are rapidly changing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 9 ∗ V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' : vwani@ee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='ucla.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='edu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' : khalidjm@seas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='ucla.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='edu [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Sprakel, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Lindstr¨om, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kodger, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Weitz, Stress enhancement in the delayed yielding of colloidal gels, Physical Review Letters 106, 248303 (2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [2] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Jawed, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Dieleman, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Audoly, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Reis, Untangling the mechanics and topology in the frictional response of long overhand elastic knots, Physical Review Letters 115, 118302 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [3] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Alert, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Mart´ınez-Calvo, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Datta, Cellular sensing governs the stability of chemo- tactic fronts, Physical Review Letters 128, 148101 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [4] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Karniadakis, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kevrekidis, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Lu, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Perdikaris, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wang, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Yang, Physics- informed machine learning, Nature Reviews Physics , 1ˆa€“19 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [5] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Raissi, Deep hidden physics models: Deep learning of nonlinear partial differential equations, Journal of Machine Learning Research 19, 1 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [6] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Rubanova, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Bettencourt, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Duvenaud, Neural ordinary differential equations, Advances in Neural Information Processing Systems (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [7] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Brunton, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Proctor, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kutz, Discovering governing equations from data by sparse identification of nonlinear dynamical systems, Proceedings of the National Academy of Sciences 113, 3932 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [8] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Champion, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Lusch, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kutz, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Brunton, Data-driven discovery of coordinates and governing equations, Proceedings of the National Academy of Sciences 116, 22445 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [9] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Chen, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Huang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Raghupathi, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Chandratreya, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Du, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Lipson, Automated dis- covery of fundamental variables hidden in experimental data, Nature Computational Science 2, 433 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [10] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Iten, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Metger, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wilming, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Rio, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Renner, Discovering physical concepts with neural networks, Physical Review Letters 124, 010508 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [11] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Liu and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Tegmark, Machine learning conservation laws from trajectories, Physical Review Letters 126, 180604 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [12] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Liu and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Tegmark, Machine learning hidden symmetries, Physical Review Letters 128, 180201 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [13] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Lee and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Parish, Parameterized neural ordinary differential equations: Applications to 10 computational physics problems, Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences 477, 20210162 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [14] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Desai, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Mattheakis, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Joy, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Protopapas, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Roberts, One-shot transfer learning of physics-informed neural networks, in ICML 2022 2nd AI for Science Workshop (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [15] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Li, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kovachki, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Azizzadenesheli, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Liu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Bhattacharya, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Stuart, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Anand- kumar, Neural operator: Graph kernel network for partial differential equations (2020), arXiv:2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='03485 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='LG].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [16] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Li, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kovachki, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Azizzadenesheli, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Liu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Bhattacharya, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Stuart, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Anand- kumar, Fourier neural operator for parametric partial differential equations, in International Conference on Learning Representations (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [17] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Lu, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Jin, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Pang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Zhang, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Karniadakis, Learning nonlinear operators via deeponet based on the universal approximation theorem of operators, Nature Machine Intelli- gence 3, 218ˆa€“229 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [18] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wang, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Perdikaris, Learning the solution operator of parametric partial differential equations with physics-informed DeepONets, Science Advances 7, eabi8605 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [19] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Zhao, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Li, Bridging multi-task learning and meta-learning: Towards efficient training and effective adaptation, in Proceedings of the 38th International Conference on Machine Learning (PMLR, 2021) pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 10991–11002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [20] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Finn, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Abbeel, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Levine, Model-agnostic meta-learning for fast adaptation of deep networks, in International conference on machine learning (PMLR, 2017) pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1126–1135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [21] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Nichol, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Achiam, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Schulman, On first-order meta-learning algorithms, CoRR abs/1803.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='02999 (2018), 1803.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='02999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [22] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Finn, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Rajeswaran, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kakade, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Levine, Online meta-learning, in Proceedings of the 36th International Conference on Machine Learning, Proceedings of Machine Learning Research, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 97, edited by K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Chaudhuri and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Salakhutdinov (PMLR, 2019) pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1920–1930.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [23] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Rajeswaran, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Finn, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kakade, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Levine, Meta-learning with implicit gradients, in Advances in Neural Information Processing Systems, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 32 (Curran Associates, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [24] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Raghu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Raghu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Bengio, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Vinyals, Rapid learning or feature reuse?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Towards understanding the effectiveness of MAML, in International Conference on Learning Represen- tations (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [25] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Chen, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Amos, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Nickel, Learning neural event functions for ordinary differ- 11 ential equations, International Conference on Learning Representations (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [1] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Li, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wang, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Roychowdhury, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Jawed, Rapidly encoding generalizable dynamics in a Euclidean symmetric neural network, Extreme Mechanics Letters , 101925 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [27] See supplemental material for details on iMODE training, wall bouncing system, double pen- dulum system, dimension determination of latent space, demonstration on complex systems, and movies on diffeomorphism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [28] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Paszke, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Gross, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Massa, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Lerer, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Bradbury, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Chanan, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Killeen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Lin, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Gimelshein, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Antiga, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Desmaison, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kopf, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Yang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' DeVito, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Raison, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Te- jani, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Chilamkurthy, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Steiner, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Fang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Bai, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Chintala, Pytorch: An imperative style, high-performance deep learning library, in Advances in Neural Information Processing Systems 32, edited by H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wallach, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Larochelle, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Beygelzimer, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' d’Alch´e Buc, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Fox, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Garnett (Curran Associates, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', 2019) pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 8024–8035.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 12 Invertible Transformation Latent Space Physical Space k1 k3 η1 η2 1 2 3 4 5 Order 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9 1 Explained variance ratio Pendulum Bistable Van der Pol 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='99 [s] Loss 0 5 20 40 60 80 100 Epochs 10-6 10-4 10-2 10-1 1 2 3 4 5 Order Explained variance ratio 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 4 3 2 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1 0 [s] 2 1 0 1 2 2 1 0 x (m) Pendulum Bistable Van der Pol (a) (b) (d) (c) (e) width increasing width 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 m 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 m Fast adaptation Median 25% and 75% quantiles Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (a) Comparison of iMODE test adaptation v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' training from scratch on (50) unseen bistable system instances with randomly chosen physical parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' iMODE demonstrates fast adaptation and good generalization within the first 5 adaptation steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (b) The true and learned potential energy functions for the wall bouncing system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The width of the potential well increases as the adaptation parameter increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (c) The number of top PCA components that preserve a significant portion (> 99%) of the variance gives a good estimation of the dimension of true physical parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (d) The diffeomorphism constructed by NODE for the bistable system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' It shows how a grid in the physical space is continuously deformed into the latent space of adaptation parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (e) The mean error and computation time of Neural Gauge for 100 systems with randomly generated unseen parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 13 27 1 0 UU2lation10-Fscratc s woJ scratch uoJ AODE o2or train test tatior10 9-Pred-1-True0 m1 211 2C aseNone 0 24 32ergy( 1Case No655Energy 1 010pred-true05 201 40 E60 pochs8010065 4 310 --LOSS-iNMODE evallSupplemental Information Testing performance of the iMODE method 2 1 0 1 2 x (m) 4 2 0 2 4 v (m/s) (a) (b) (c) Figure S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The testing performance of (a) the pendulum system, (b) the bistable system, (c) the Van der Pol system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The solid lines are ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In (a) and (b) the circles, in (c) the dashed lines, are predictions of corresponding iMODE models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Different colors represent different parametric systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Oscillating pendulum ml2¨θ + mgl sin (θ) = 0 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' θ(0) = θ0, ˙θ(0) = ˙θ0 (7) The training dataset contains 5 system instances with l = [1, 3, 5, 7, 9] m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' A long trajectory of 10 s is generated for each instance with the initial position and velocity π/2 and 0 s−1 and time marching stepsize 10 ms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' During training, a batch of 20 trajectories of 1 s are pulled out randomly in each epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' So essentially the iMODE training is seeing 1 s trajectories with differential initial conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The learnt iMODE model is tested on 8 unseen system instances with l = [2, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5, 4, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1, 6, 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9, 8, 10] m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The task adaptation is similarly done as the training, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', seeing a batch of 20 randomly pulled-out trajectories of 1 s for each testing system instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The task adaptation only takes 5 steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Then the learnt model for each instance is used to calculate a trajectory of 5 s given an initial condition, and compared with ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The results are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S1(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The solid lines (the ground truth) match well with the circles (prediction).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 14 V0 m/s)8 888.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 1V0 (m/s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='52 0 (m) X0 (m) X54T米1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='500000K1ε increasing δ increasing ω increasing Figure S2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The three variation directions in the latent space for the physical parameters of the Van der Pol system ϵ, δ, and ω.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Bistable oscillator ¨x + k1x + k3x3 = 0 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' x(0) = x0, ˙x(0) = ˙x0 (8) The training dataset contains 20 system instances, a mesh of k1 = [−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6, −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8, −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0] and k3 = [2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6, 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Trajectories of multiple initial conditions with stepsize 10 ms and time span 10 s are generated for each instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' During training, a batch of 100 randomly pulled-out trajectories of 1 s is used for each epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' During testing, task adaptation takes 5 steps on previously unseen systems [k1, k3] = [−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1], [−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2], [−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5, 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The learnt models calculate trajectories of 5 s given an initial condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The results are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S1(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Van der Pol system ¨x − ϵ ˙x(1 − δx2) + ω2x = 0 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' x(0) = x0, ˙x(0) = ˙x0 (9) The training dataset contains 27 system instances, a mesh of ϵ = [1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0], δ = [1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0], and ω = [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Trajectories of multiple initial conditions with step- size 10 ms and time span 10 s are generated for each instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' During training, a batch of 100 randomly pulled-out trajectories of 1 s is used for each epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' During testing, task adaptation takes 5 steps on previously unseen systems instances [ϵ, δ, ω] = [1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1], [1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4], [2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The learnt models calculate trajectories of 5 s given an initial condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The results are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S1(c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 15 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 M21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='571n0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 1 M11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 M1 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 M21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='615=3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='55=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 5=2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 M211 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 M10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5L 211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='53-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5E E E二1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 =3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0The three variation directions in the latent space η ∈ R3 for ϵ, δ, and ω are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Other systems (a) (b) Figure S3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (a) The wall bouncing system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (b) The double pendulum system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wall bouncing system The governing equation for the wall bouncing system (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S3(a)) is ¨x+F(x) = 0 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' x(0) = x0, ˙x(0) = ˙x0 F(x) = � −k(x − w), x ≥ w 0 , |x| < w −k(x + w), x ≤ −w (10) x and v are the particle position and velocity, k = 1000 N/m is a large constant to approx- imate a stiff wall, w is the (half-)width of the potential well, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S3(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The system has a potential energy well with the following form E(x) = � � � 0, |x| < w ∞, |x| ≥ w (11) The training dataset contains 10 system instances, with the width increasing from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1 m to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 m by 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1 m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Trajectories of multiple initial conditions (initial position 0 m, and initial velocities ranging from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='1 m/s to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 m/s) with stepsize 10 ms and time span 10 s are generated for each instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' During training, a batch of 100 randomly pulled-out trajectories of 1 s is used for each epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 16 In this case the intermediate output of the NN is the energy and the force is derived by taking the derivative of the output with respect to the input, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' F = ∂E ∂x = ∂(NNθ(x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) + NNθ(−x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η)) ∂x (12) The second equality takes advantage of the assumption that the energy is symmetric with respect to x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The learning results show η ∈ R to be in perfect correlation with the width w of the potential well (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' In other words, we can control the width of the constructed potential well of the NN, which is another way to interpret the physical meaning of the adaptation parameter η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1 3 5 7 9 case No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 1 Width (m) 0 3 6 9 12 Figure S4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The true widths of the wall bouncing system and the learnt adaptation parameters η are in perfect correlation (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='87%).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Double pendulum The double pendulum, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S3(b), has two masses m1 = m2 = 1 kg and arm lengths L1 and L2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The governing equations are ˙θ1 = ω1 ˙θ2 = ω2 ˙ω1 = −g (2m1 + m2) sin θ1 − m2g sin (θ1 − 2θ2) − 2 sin (θ1 − θ2) m2 (ω2 2L2 + ω2 1L1 cos (θ1 − θ2)) L1 (2m1 + m2 − m2 cos (2θ1 − 2θ2)) ˙ω2 = 2 sin (θ1 − θ2) (ω2 1L1 (m1 + m2) + g (m1 + m2) cos θ1 + ω2 2L2m2 cos (θ1 − θ2)) L2 (2m1 + m2 − m2 cos (2θ1 − 2θ2)) (13) 17 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 1 L1 increasing 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 m 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 m L2 increasing 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 m 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 m (a) (b) Figure S5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The learnt latent space of adaptation parameters for the double pendu- lum system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (a) Each marked line shows systems with the same L1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (b) Each marked line shows systems with the same L2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' There are clearly two directions in the latent space (indicated by the arrows) corresponding to the change of physical parameters L1 and L2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The training dataset contains 16 system instances, a mesh of L1 = [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8] m and L2 = [0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8] m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Trajectories of initial locations [π/4,π/4] and initial velocities [0,0] with stepsize 10 ms and time span 10 s are generated for each system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' During training, a batch of 100 randomly pulled-out trajectories of 1 s is used for each epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Task adaptation takes 5 steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The learnt latent space of adaptation parameters is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' It is clear that two directions exist corresponding to the variation of physical parameters L1 and L2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' This again underlines the interpretability of η ∈ R2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Comparison between iMODE and training from scratch We compare the performance of iMODE adaptation to the “training from scratch” (TFS) approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The iMODE adaptation starts with a weight initialization trained from a train- ing dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' It updates the adaptation parameter η ∈ R2 on a testing dataset, which is not included in the training dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The TFS approach uses the same NN architecture and hy- perparameters as in the iMODE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The TFS NN is randomly initialized and all the weights are updated on the same testing dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' After training the TFS NN and adapting the iMODE η NN using the same testing dataset, the two NNs are evaluated on an unseen evaluation dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 3(a), the iMODE significantly outperforms the TFS approach in terms of adaptation speed (v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' training speed in the TFS approach) and evaluation accu- 18 0 5 20 40 60 80 100 Epochs 10 -8 10 -6 10 -4 10 -3 10 -2 10 -1 Loss From scratch test From scratch eval iMODE test iMODE eval 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 1 x (m) 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='12 Energy (J) From scratch iMODE True (a) (b) Figure S6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (a) The training/adaptation and evaluation performance of the TFS and iMODE NNs, given a single trajectory of the bistable system with physical parameters k1 = −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 and k3 = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0, and initial condition x0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 m and ˙x0 = 0 m/s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (b) The iMODE learns the correct double-well potential energy function while the TFS approach learns nothing due to data scarcity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' racy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' This means that the iMODE approach can learn the dynamics of an unseen system more rapidly and predict future events more accurately than a TFS NN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' This observation is pronounced in the following case: we feed these two NNs a single trajectory of the bistable system with physical parameters k1 = −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0 and k3 = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='0, and initial condition x0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 m and ˙x0 = 0 m/s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' After training/adaptation, we evaluate the TFS and iMODE NNs on tra- jectories of the same system but with differential initial conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The training/adaptation and evaluation curves are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S6(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The iMODE outperforms the TFS approach in both training/adaptation and evaluation accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The learnt energy functions of both approaches are compared in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S6(b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The energy function of the TFS NN is totally in- correct due to the data scarcity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Under this specific initial condition, the bistable system is only oscillating intra-well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' So the information contained in the trajectory is insufficient to depict the entire potential energy surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Meanwhile the iMODE NN learns an accurate double-potential-well function from the same data because appropriate prior knowledge on the energy functions of bistable systems (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' double-well) is already embedded in its weight initialization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 19 Dimension determination of physical parameters with PCA The workflow of using PCA to determine the optimal dimension d of the adaptation parameters η for a parametric system is: (1) Make a rough guess ˜d on the dimension, then run the iMODE algorithm on the trajectories of Ns systems;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (2) Form a matrix with the results M = [η1, η2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' , ηNs];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (3) Perform PCA on M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' A significant portion of variance (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 99%) will be preserved in the first ˆd dimensions, an estimation for d;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (4) repeat the process with different initial guesses ˜d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The optimal dimension is more credible when different ˜d results in the same ˆd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 1 2 3 4 5 Order 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9 1 Ratio Pendulum 1 2 3 4 5 Order 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9 1 Wall bouncing 1 2 3 4 5 Order 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9 1 Bistable 1 2 3 4 5 Order 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9 1 Double pendulum 1 2 3 4 5 Order 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='9 1 Van der Pol Figure S7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' PCA can be used to determine the optimal dimensions of the adaptation parameters η in the studied systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' These optimal dimensions prove to equal the true dimension dφ of the physical parameters φ of the systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The red dashed line indicates 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The PCA determination results of all systems are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The red dashed lines mark the 99% variance preservation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Dotted lines in each case mean that the initial guess ˜d is the dimension of real physical parameters dφ plus 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' For example, in the Van der Pol system, the dotted line means that the initial guess ˜d = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' After the PCA, if we preserve 4 or 3 principal components, the variance energy is still preserved by more than 99%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' If we further reduce the number of preserved principal components to 2 or 1, we see a sudden drop (to below the 99% threshold), which indicates the optimal dimension to be 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The ˜d for dashed-dotted and solid lines are the dimension of real physical parameters dφ plus 1 and 0 respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' With different ˜d, we can repeatedly confirm the optimal dimension of η, to be 3 in the case of Van der Pol system (which is the true dimension of physical parameters).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' For other systems, the workflow is the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 20 Neural Gauge diffeomorphism As suggested by the PCA analysis in Section S, the adaptation parameters {ηi}Ns i=1 that are adapted to the system instances belonging to a family of dynamical systems occupy a dφ-dimensional manifold, even if the latent space they reside in is dη-dimensional and dη ≥ dφ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Therefore, a diffeomorphism can be established mapping η in the latent space to their corresponding physical parameters ({φi}Ns i=1) even if their dimensions do not match, assuming dη ≥ dφ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Practically, the neural ODE modelling such diffeomorphism can be defined as dz(t)/dt = gξ(z), such that for i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' , Ns, starting from a given point in the latent space, z(0) = ηi, the state z at t = 1, z(1) = � φT i 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 0 �T is the concatenation of corresponding physical parameters and dη − dφ padding zeros.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Complex cases Slinky: the Euclidean symmetric neural network Figure S8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The Euclidean invariance on energy and induced equivariance on force of the NN used in the Slinky system case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' All the Euclidean transformed configurations have the same energy as the original configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The elastic forces on the middle bars are Euclidean- transformed accordingly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The NN used in the Slinky case follows the Euclidean symmetric neural network (ESNN) 21 [1] architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The Slinky is decomposed into 40 consecutive triplets, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', the 2D rep- resentation of 3 adjacent cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' We denote the coordinates of the ith triplet as ξi = � xT i−1, xT i , xT i+1 �T ∈ R9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' xi ∈ R3 is the coordinates of the ith bar, including the x and y coordinates of the bar center and the inclination angle of the bar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The potential energy associated with the middle bar of a triplet is only a function of the coordinates of the 3 bars of the same triplet (and of the adaptation parameters), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Ei = Ei(ξi;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) (14) In the following we will omit the subscript i for brevity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The induced force F from E is F = ∂E ∂xi = ∂E(z;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) ∂xi = ∂E(ξ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) ∂xi (15) where z ∈ R6 is the relative coordinates between the bars of the ith triplet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' We enforce Euclidean invariance, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', translational, rotational, and chiral invariance, on E with respect to ξ, by taking E(z) the following form, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', the ESNN E(z;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) = NNθ(z;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) + NNθ (Rx(z);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) + NNθ (Ry(z);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) + NNθ (Rx (Ry(z)) ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) (16) where Rx(·) and Ry(·) denote reflection with respect to the x and y axes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Note that Rx (Rx(·)) = I(·), Ry (Ry(·)) = I(·), and Rx (Ry(·)) = Ry (Rx(·)) (17) where I(·) is the identity operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' It is easy to prove the chiral invariance of E, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', E(z;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) = E (Rx(z);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) = E (Ry(z);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) = E (Rx (Ry(z)) ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η) (18) Then from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (15), F is equivariant to rigid body and chiral transformations on ξ, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S8, including translation, rotation, and reflection, regardless of η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The ESNN will be applied on each triplet in the Slinky to calculate the elastic force acting on each bar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The assembled force vector is used to update the system state inside the NODE framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The difference between the true and predicted trajectories is used to update the ESNN weights θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' After training and performing trajectory predictions in 2D, a geometric method can be used to reconstruct the 3D Slinky configurations [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' See [1] for more implementation details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The training dataset contains 4 Slinkies of different Young’s modulus (50, 60, 70, and 80 GPa).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The Slinkies are clamped at both ends and freely drop under gravity from a horizontal initial configuration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Two inner steps are taken to update η ∈ R for each Slinky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Note when 22 (a) (b) True NN True NN D D = 68mm Figure S9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (a) The testing performance of the iMODE model on an unseen Slinky (of an unseen Young’s modulus) with the same boundary condition as the training dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Top and bottom rows are ground truth and the iMODE model prediction at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='28, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='47, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='65, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='83 s (left to right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (b) The testing performance of the iMODE model on unseen initial and boundary conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Top and bottom rows are ground truth and the iMODE model prediction at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='15, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='32, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='48, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='65 s (left to right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' η is updated, the NN always preserves energy invariance and force equivariance with respect to the coordinates of the Slinky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' After training the NN, we perform task adaptation (2 steps) on a unseen Slinky of Young’s modulus 56 GPa and observe a good fitting result (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S9(a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The resulting NN is then directly applied to computation under an unseen boundary condition and Slinky orientation (the bottom end is free and the Slinky drops under gravity from a vertical initial configuration) without any modification (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S9(b)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 23 We can achieve this because the model-agnostic nature of the iMODE method allows us to embed the Euclidean symmetries into the NN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Kolmogorov-Petrovsky-Piskunov (KPP) equation To solve the KPP equation, we discretize the spatial domain [0,1] into 20 segments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' So the the partial differential equation system (here x denotes the spatial coordinate) ∂u ∂t = D∂2u ∂x2 + ru(1 − u) (19) is represented by an ordinary differential equation system containing 21 variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The diffusion term is approximated by 2nd-order central difference and the diffusivity is assumed known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The meta-learning is performed to learn the reaction term with different reaction strength coefficients r (without knowing the mathematical form).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The training dataset contains 5 systems with r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='01, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='02, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='03, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='04, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The Neumann boundary condition u′(0) = u′(1) = 0 (′ denotes derivative with respect to x) is used across the training dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The iMODE task adaptation takes 5 iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The training results for r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='01, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='05 are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S10(a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The iMODE NN is then adapted on the data from a unseen system instance with r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='034.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The resulted NN is directly applied to computation with unseen initial and boundary conditions (Dirichlet type u(0) = u(1) = 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The results of the latter are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S10(b) and a good agreement is observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' This again validates the capability of the iMODE algorithm to fast adapt on unseen complex parametric systems and accurately predict on initial and boundary conditions different from those in the training dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Another testing result for the KPP system is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' S10(c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The testing has the same type of boundary condition (u′(0) = u′(1) = 0) as the training dataset but an unseen initial condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The prediction (right) matches the ground truth (left) well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Neural network architecture Throughout this letter, we use a DenseNet-like architecture [2] for our neural networks (NNs), where shortcut pathways are created for a layer from all its previous layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' It takes in the input and first increases the feature dimension to 32 by a fully-connected (FC) layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 24 Then the feature is passed through FC layers with Softplus activation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' A new feature with an increased dimension is formed by concatenating the previous feature with the FC layer output, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=', fi = � �FC(fi−1) ∈ R32 fi−1 � � , i = (1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' , 5) f0 = FC(Input) ∈ R32 Output = FC(f5) (20) where fi is the feature map for the ith layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' After passing through 5 densely connected layers, the feature dimension is increased to 192.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' This feature is then passed through a FC layer with no activation to produce the final output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' For pendulum, bistable, wall bouncing, and Slinky systems, the NN input is the vector concatenating the system position x and the adaptation parameter η, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [xT, ηT]T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The output is a scalar, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' the energy of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The force vector is calculated by back- propagating the NN output with respect to x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' For Van der Pol system, the input is the vector concatenating the system state y and η, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' [yT, ηT]T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The output is the force vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' For KPP system, the input is [u, η].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The output is the reaction forcing term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Supplementary movie Movie S1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The diffeomorphism for the bistable system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The data points are transformed from the physical space (subtracted mean) to the latent space of adaptation parameters (subtracted mean).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The right subplot is the enlarged view of the left plot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Movie S2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The diffeomorphism for the Van der Pol system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The data points are trans- formed from the physical space (subtracted mean) to the latent space of adaptation param- eters (subtracted mean).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The right subplot is the enlarged view of the left plot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' ∗ V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' : vwani@ee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='ucla.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='edu, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' : khalidjm@seas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='ucla.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='edu [1] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Li, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Wang, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Roychowdhury, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Jawed, Rapidly encoding generalizable dynamics in a Euclidean symmetric neural network, Extreme Mechanics Letters (2022) 101925.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 25 [2] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Huang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Liu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Van Der Maaten, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Weinberger, Densely connected convolutional networks, in: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 2261–2269.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 26 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 1 x (m) 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 1 u 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 1 x (m) 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8 1 u (a) t (b) (c) Figure S10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (a) The training results of the iMODE algorithm on the KPP system for r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='01 (left) and r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='05 (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Solid lines are ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' Dashed lines are iMODE predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The arrow indicates time marching of u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (b) The iMODE testing results on an unseen system r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='034 with an unseen boundary condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The ground truth (left) match the iMODE prediction (right) well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (c) The iMODE testing results (r = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='034) on an unseen initial condition from the training dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The ground truth (left) match the iMODE prediction (right) well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 27 00.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 (m) X0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 (m) X0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5S 15S 1522.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='522.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='530307.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='500.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 (m) X0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='225 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 (m) X10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4S 157.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5S 150.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='622.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='522.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='830300.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 1S 15S 1522.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='530300.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 (m) X00.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 (m) X0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='225 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='75 (m) X10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='4S 15S 150.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='622.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='522.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='8303000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content='5 (m) X Figure S11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' (color online).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' The DenseNet-like structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} +page_content=' 28' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/wtAzT4oBgHgl3EQfCPrg/content/2301.00957v1.pdf'} diff --git a/xdE0T4oBgHgl3EQf-gIr/vector_store/index.faiss b/xdE0T4oBgHgl3EQf-gIr/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..21adef280db06b7e6476539189193083c0a147f8 --- /dev/null +++ b/xdE0T4oBgHgl3EQf-gIr/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a99ba62cb7f58fbf90b6cfd876f7e0a58ecf795d597b2798c08723884691321 +size 5898285 diff --git a/xdE0T4oBgHgl3EQf-gIr/vector_store/index.pkl b/xdE0T4oBgHgl3EQf-gIr/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ec670efb68af0deaadb23a6fc1a8eeeb5b59ed68 --- /dev/null +++ b/xdE0T4oBgHgl3EQf-gIr/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2098f93392d2e6248727b5f3a715b3b78dd04edc3f82cd7a5cda14f344c58937 +size 220757 diff --git a/ydE2T4oBgHgl3EQf3wjf/content/2301.04175v1.pdf b/ydE2T4oBgHgl3EQf3wjf/content/2301.04175v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aa604f38022484796013a17a63164d90c27be67e --- /dev/null +++ b/ydE2T4oBgHgl3EQf3wjf/content/2301.04175v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac864719f6a616d17359e2b1c8f9269383324425309f83cadf74198ae7caa9fc +size 8031463 diff --git a/zdAyT4oBgHgl3EQfbPea/content/2301.00259v1.pdf b/zdAyT4oBgHgl3EQfbPea/content/2301.00259v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0ec321e1aeaed68a9ed7b5d507641722a2a20814 --- /dev/null +++ b/zdAyT4oBgHgl3EQfbPea/content/2301.00259v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b83dc305c52ffb8a1727d8b9ca4f65ba3c60e62ca04cc657001500a7438ecd22 +size 4213775 diff --git a/zdAyT4oBgHgl3EQfbPea/vector_store/index.faiss b/zdAyT4oBgHgl3EQfbPea/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..3d456df9db50c3bf7041a2e944f0976b0ed6c4ff --- /dev/null +++ b/zdAyT4oBgHgl3EQfbPea/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59cfb37a9de48429aaef43f88dee9caf09de9114e24116f5b4e859ce59a35888 +size 6160429 diff --git a/zdAyT4oBgHgl3EQfbPea/vector_store/index.pkl b/zdAyT4oBgHgl3EQfbPea/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6a68184136a2984395e5f36272a81e04bae2f9af --- /dev/null +++ b/zdAyT4oBgHgl3EQfbPea/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bcd541bbe904b34c07576460ff6559ecb050760f517b9f34c70a55aeffc828b +size 226485 diff --git a/ztFJT4oBgHgl3EQfjCwq/content/tmp_files/2301.11572v1.pdf.txt b/ztFJT4oBgHgl3EQfjCwq/content/tmp_files/2301.11572v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..3f45bfb26ca3ed3c59933286cba99156950138b1 --- /dev/null +++ b/ztFJT4oBgHgl3EQfjCwq/content/tmp_files/2301.11572v1.pdf.txt @@ -0,0 +1,1764 @@ +BLANK +1 +Noncontact Haptic Rendering of Static Contact with +Convex Surface Using Circular Movement of +Ultrasound Focus on a Finger Pad +Tao Morisaki, Masahiro Fujiwara, Member, IEEE Yasutoshi Makino, and Hiroyuki Shinoda, Member, IEEE +Abstract—A noncontact tactile stimulus can be presented by +focusing airborne ultrasound on the human skin. Focused ultra- +sound has recently been reported to produce not only vibration +but also static pressure sensation on the palm by modulating +the sound pressure distribution at a low frequency. This finding +expands the potential for tactile rendering in ultrasound haptics +as static pressure sensation is perceived with a high spatial +resolution. In this study, we verified that focused ultrasound can +render a static pressure sensation associated with contact with a +small convex surface on a finger pad. This static contact rendering +enables noncontact tactile reproduction of a fine uneven surface +using ultrasound. In the experiments, four ultrasound foci were +simultaneously and circularly rotated on a finger pad at 5 Hz. +When the orbit radius was 3 mm, vibration and focal movements +were barely perceptible, and the stimulus was perceived as static +pressure. Moreover, under the condition, the pressure sensation +rendered a contact with a small convex surface with a radius +of 2 mm. The perceived intensity of the static contact sensation +was equivalent to a physical contact force of 0.24 N on average, +which was 12 times the radiation force physically applied to the +skin. +Index Terms—Static contact sensation, convex surface, midair +haptics, focused ultrasound. +I. INTRODUCTION +A +IRBORNE ultrasound tactile display (AUTD), which can +present a noncontact tactile stimulus, is a promising +tool for haptics since it dose not require users to physically +contact with any devices [1]. An AUTD is a device with an +array of independently controllable ultrasound transducers [2], +[3], [4]. AUTDs can focus ultrasound waves on arbitrary +points in the air by controlling the phase of each transducer. +At the focus, a nonnegative force called acoustic radiation +force is generated [5], which conveys a noncontact tactile +stimulus onto human skin. This has been used in various +applications [1], such as human motion guidance [6], [7], [8], +touchable midair image displays [9], [10], [11], and remote +visual-haptic communication system [12], as the noncontact +stimulus by AUTD does not obstruct a user’s movement and +vision. +Recently, Morisaki et al. reported that AUTD can present +not only vibratory sensations but also static pressure sensa- +tions [13]. A static pressure sensation is indispensable for +Manuscript received xx; revised xx. +This work was supported in part by JSPS KAKENHI Grant Number +21J12305 and JST CREST JPMJCR18A2. +The +authors +are +with +the +Graduate +School +of +Frontier +Sciences, +the University of Tokyo, Kashiwa-shi, Chiba, 277-8561, Japan (e-mail: +morisaki@hapis.k.u-tokyo.ac.jp; Masahiro Fujiwara@ipc.i.u-tokyo.ac.jp; ya- +sutoshi makino@k.u-tokyo.ac.jp; hiroyuki shinoda@k.u-tokyo.ac.jp). +tactile displays because the sensation is the main component +of contact perception and is perceived with a higher resolution +than vibratory sensations [14]. However, in the conventional +ultrasound haptics technique, a static pressure sensation is +excluded from the presentable sensation of the AUTD. Ultra- +sound radiation force must be spatiotemporally modulated as it +is less than several tens of mN [15], [16], [17], [18], [19]. This +modulation has limited the tactile stimulus presented by the +AUTD to a vibratory sensation. Morisaki et al. addressed this +limitation and found that AUTD can present a static pressure +sensation by repeatedly moving an ultrasound focus along the +human skin at 5 Hz with a 0.2 mm spatial step width of the +focus movement [13]. The focal trajectory was a 6 mm line, +and the presentation location was a palm only. +In this study, we experimentally demonstrate that static +pressure sensation by ultrasound can be evoked even at a finger +pad. Moreover, we also show that by using a circular focal +trajectory, the pressure sensation can render a static contact +with a small convex surface on the finger pad. The radius +of the rendered convex surface is varied from 2 to 4 mm. +Rendering static contact with such a small convex surface has +been difficult for conventional ultrasound haptics techniques +because the perceptual resolution of vibratory sensations is +lower than that of static pressure sensations [14]. This contact +sensation rendering enables the noncontact tactile reproduction +of fine corrugated surfaces with a minimal spot size of several +millimeters, which is equivalent to a spatial resolution of 1 cm. +Previous studies rendered an uneven surface (e.g., bumps and +holes) using ultrasound. However, in these studies, the contact +sensation was not static as the finger and palm must be moved +to perceive the rendered surface. Howard et al. and Somei et +al. rendered an uneven surface by dynamically changing the +intensity or position of the ultrasound focus according to hand +movement [20], [21]. +In the experiment, an ultrasound focus rotating in a circle +at 5 Hz is presented to a finger pad, and the radius of +the trajectory is varied from 2 to 6 mm. We evaluate the +intensity of the vibratory and movement sensations of the +focus produced by the presented stimulus. We also evaluated +curvature of the tactile shape (i.e., flat, convex, or concave) +perceived on the finger pad. Moreover, we examine the optimal +ultrasound focus shape for creating a perfect static pressure +sensation. +arXiv:2301.11572v1 [cs.HC] 27 Jan 2023 + +BLANK +2 +II. RELATED WORKS +In this section, we summarize previous studies on point +stimulation and haptic shape rendering using ultrasound to +clarify the contribution of this study. +A. Vibratory and Static Pressure Sensation by Ultrasound +Two presentation methods have been employed to create +a single point vibrotactile sensation: Amplitude Modulation +(AM) [16] and Lateral Modulation (LM) [17], [18]. AM is a +stimulation method wherein the amplitude of the presented +radiation pressure is temporally modulated [16]. In LM, a +vibratory stimulus is presented by periodically moving a single +stimulus point (ultrasound focus) along the skin surface with +constant pressure [17], [18]. Takahashi et al. presented an LM +stimulus on the palm and showed that its perceptual threshold +was lower than that of the AM stimulus [17], [18]. The focal +trajectory used by Takahashi et al. was a line and circle +with representative lengths of a few millimeters. Additionally, +Spatiotemporal Modulation (STM) method have been used to +create a larger trajectory of a moving focus [19], [22]. Frier +et al. presented a circular STM stimulus with circumferences +of 4–10 cm, which were larger than that of the LM stimulus +presented by Takahashi et al [19], [17], [18]. +A static pressure sensation can be produced by a low- +frequency LM stimulus with a fine spatial step width of the +focal movement. Morisaki et al. presented a static pressure +sensation using an LM stimulus at 5 Hz with a step width of +0.2 mm [13]. The focal trajectory was a 6 mm line. Under +this condition, the vibratory sensation included in the LM +stimulus was suppressed to 5% in a subjective measure, and +the perceived intensity was comparable to 0.21 N physical +pushing force on average. The pressure sensation by ultra- +sound has been presented only on the palm, and whether the +pressure sensation can be evoked on a finger pad has not +been confirmed. This study aims to present the static pressure +sensation to a finger pad. Morisaki et al. and Somei et al. +presented a low frequency-fine step LM stimulus to a finger +pad. However, they did not evaluate its tactile feeling [11], +[21]. +B. Rendering Haptic Shape Using Ultrasound +Several studies have presented symbolic two-dimensional +haptic shapes, such as a line and circle on the palm using +AUTD. To render them, Korres and Eid used AM with multiple +foci [23]. Marti et al. and Hajas et al. used STM stimulus, +wherein the focal trajectory is the perimeter of the target +shape [24], [25]. Mulot et al. drew a curved line to the palm +using STM stimulus and evaluated whether its curvature can +be discriminated [26], [27]. +Moreover, AUTD has been used for tactile reproduction of +contact between 3D objects and hands. Inoue et al. presented a +3D static haptic image using an ultrasound standing wave [28]. +Long et al. presented multiple ultrasound foci on a palm and +rendered the contact shape with a virtual 3D object [29]. +Matsubayashi calculated the contact area between a finger +and a virtual 3D object and rendered this area to a finger +151.4 mm +192 mm +Fig. 1. +One unit of airborne ultrasound tactile display (AUTD) used in +this study. The one AUTD unit is equipped with 249 ultrasound transducers +operating at 40 kHz. +pad by presenting an LM stimulus whose focal trajectory was +the perimeter of the calculated contact area [30], [31]. These +studies aimed to reproduce the macroscopic shape of a 3D +object and did not reproduce contact shape with a fingertip- +sized small convex surface, as in this study. Moreover, static +pressure sensations were not presented in these studies. Long +et al. used AM at 200 Hz [29] and Matsubayashi et al. LM at +100 Hz [30], [31]. The static haptic image presented by Inoue +et al. was not modulated, but the participants had to keep +moving their hands to perceive its tactile sensations [28]. +Several studies have reproduced uneven surfaces using +AUTD. Howard et al. presented three tactile shapes to a palm: +bump, hole, and flat, by dynamically changing the intensity of +the ultrasound focus based on the hand position [20]. Somei +et al. presented a convex surface sensation to a finger pad +by changing the position of the ultrasound tactile stimulus +according to finger position [21]. Perceiving tactile shapes +using these methods require active finger or hand movement. +However, this study aims to perceive a static convex shape +while the fingers are stationary. +III. AIRBORNE ULTRASOUND TACTILE DISPLAY (AUTD) +In this study, we used Airborne Ultrasound Tactile Display +(AUTD) to present noncontact tactile stimuli. AUTD com- +prises an array of ultrasound transducers [2], [3], [4]. An +AUTD can focus ultrasound by controlling a phase of each +transducer, and focused ultrasound generates a nonnegative +pressure called acoustic radiation pressure. Ultrasound focus +can be narrowed to the diffraction limit. +Four AUTDs were used in the experiments. The one AUTD +unit was equipped with 249 ultrasound transducers operating +at 40 kHz (TA4010A1, NIPPON CERAMIC Co., Ltd.) [32], +[33]. Fig. 1 shows the AUTD. Each AUTD communicated via +the EtherCat protocol and was synchronously driven. +IV. STIMULUS DESIGN +A. Overview +In this section, we propose and describe two stimulus +methods: LM-single focus (LM-S) and LM-multi foci (LM- +M). In the subject experiment, we compared and evaluated +them to investigate whether they could render a static contact +sensation with convex surface. Fig. 2 shows a schematic of + +BLANK +3 +Step width +Focal position +Radius +Single focus +(LM-S) +Positions of multi foci +Multi foci +(LM-M) +Fig. 2. Schematic of LM-S (single focus) stimulus and LM-M (multi foci) +stimulus. In the LM-S, a single focus is periodically moved in a circle on a +finger pad. In the LM-M, multiple foci are simultaneously rotated. The foci +are placed along with the circular trajectory. +y +z +x +Midair image display +4 AUTD units +Depth camera +20 deg +x +y +z +Midair +image +deg +x +y +z +Depth +camera +Fig. 3. +Experimental equipment used in all subject experiments in this +study. This equipment presents a midair image marker. An ultrasound tactile +stimulus (LM stimulus) is presented when the finger of a participant touches +this marker. The image marker was used to indicate a finger positron to a +participant. +these stimulus methods. In LM-S, a single ultrasound focus is +periodically moved in a circle on the finger pad. The LM-S +has been used in previous studies [19], [18], [30]; however, +these studies have not evaluated whether this stimulus can +produce static pressure and static contact sensations. In the +LM-M stimulus, multiple ultrasound foci were simultaneously +presented and periodically moved in a circle. The foci were +placed along the circular focal trajectory so that they were in +close proximity. The distance between foci d was fixed at 3 +mm in the experiments. +In the experiment, the amplitude of each transducer was set +to maximum and the driving phase for presenting the LM-M +stimulus was calculated using a linear synthesis scheme. Let +φi ∈ RNtrans be the phase for presenting each focus in the +LM-M, and the phase for simultaneously presenting multiple +foci φ ∈ RNtrans is expressed as follows: +φ = +Nfocus +� +i +φi, +(1) +where i ∈ {1, ...Nfocus} is the index number of multiple foci, +Nfocus is the total number of multiple foci, and Ntrans is the +total number of transducers. +B. Formulation +First, we formulated a focus movement for the LM-S +stimulus. The focus position in LM-S rj ∈ R3 is given by +the following: +rj = rcnt + A(cos θjra + sin θjrb) + zjrc, +(2) +θj = 2π +N (j − 1), +(3) +where j ∈ {1, ...N} is the index of the focus position, N is the +total number of focus positions in one cycle of the LM, rcnt ∈ +R3 is the center of the focal trajectory, and A is the radius +of the trajectory. ra, rb, and rc are unit vectors whose origin +is at rcnt and parallel to the x-, y-, and z-axis, respectively. +The value of zj was determined using the measured finger +depth position. Based on these definitions, the step width of the +focus movement is dLM = 2πA +N . The index of focus position +j changes after the dwell time of focus td. Dwell time was +td = +1 +Nf LM if the frequency of the LM stimulus is f LM. +Second, we formulated the LM-M stimulus. Let ri,j ∈ R3 +be the focus position on the LM trajectory of the i-th focus +among the foci presented simultaneously. ri,j is chosen from +rj, which is the position discretized with dLM, such that the +motion step width of the multi foci is fixed to dLM. The +conversion from rj to ri,j is expressed as follows: +ri,j = rj+(i−1)l, +(4) +l = +� d +dLM +� +, +(5) +where l is the index number calculated from the distance +between the multi foci d. l is an integer, and the decimal point +is rounded down. +V. EXPERIMENTAL EQUIPMENT +In this section, we describe the experimental equipment that +presents a midair image with noncontact tactile feedback. This +equipment was used in all the subject experiments conducted +in this study. +A. System Overview +Fig. 3 shows the experimental equipment and its coordinate +system. This system consists of the four AUTDs, a midair +image display (ELF-SR1 Spatial Reality Display, SONY), and +a depth camera (RealSense D435, Intel) used to measure the +finger position. In the experiments, we used the midair image +display to instruct participants where to put their fingers. The +coordinate system is a right-handed system whose origin is +the center of the surface of the image display. +Throughout all the experiments, the system presented a +1 × 1 cm image marker at (0, 30, 30) mm. Ultrasound +waves were output from the AUTDs when participants placed +their fingertips on the marker. The presented ultrasound wave +refracted on the surface of the image display and then focused +on the finger pad. The position of the reflected ultrasound +focus rref ∈ R3 can be calculated as the mirror image of +the original focus position rorg ∈ R3 which is expressed as +follows: +rref = rorg + 2((rp − rorg) · n)n, +(6) +where n is the normal vector of the display surface (reflective +surface), and rorg is an arbitrary point on the display surface. + +国BLANK +4 +1. Detecting contact area +1 cm +2 cm +Detection area +Contact +area +2. Calculating LM trajectory +LM trajectory +Centroid +Calculating +Fig. 4. Algorithm for presenting LM stimulus. The size of the detection area +is 1 × 1 × 2 cm. The part of the finger within this detection area is measured +as the contact area, and the focal trajectory of the LM stimulus is calculated +using this area. The center of the LM is the centroid of the contact area. +deg +Force gauge +Fig. 5. +Setup for measuring radiation force. The tip of the force gauge +to which a 1.5 cm diameter acrylic disk was attached was placed at the +focal point. The force gauge was tilted 50 deg so that this disk opposed the +propagation direction of the ultrasound wave. +B. Algorithm for Presenting LM Stimulus +In the system, there are three processes for presenting a +circular LM stimulus to the finger pad of the participant. Fig. 4 +illustrates the presentation process. First, the system detects the +contact area between a participant’s finger and midair image +marker using a depth camera. The size of the image marker is +1×1×0.5 cm. However, to measure the contact position stably, +we used the area from the surface of the image marker to 2 cm +behind (1 × 1 × 2 cm) for the contact detection. Part of the +finger within the detection area was measured as the contact +area. Second, the system calculated the focal trajectory for the +circular LM stimulus using eq. 2 or eq. 4. The center position +of the LM stimulus rcnt was the centroid of the detected +contact area. The measured depth map of the fingertip surface +was used for the z-position of the focal trajectory. Third, +the focus is presented and moved along with the calculated +trajectory at a pre-specified frequency. In this algorithm, the +rcnt is asynchronously updated with the focus position at 90 +fps. A Gaussian filter was applied to the calculated rcnt of 10 +frames to suppress the measurement error of the depth camera. +C. Measurement of Radiation Force +We measured the radiation force of the focus presented by +the system and it was 0.02 N. Fig. 5 shows the measurement +setup. In this experiment, the tip of a force gauge, to which a +1.5 cm diameter acrylic disk was attached, was placed at the +focal point. This force gauge (IMADA ZTS-2N) can measure +forces up to 2 N with a resolution of 0.001 N. The force gauge +-20.0 +-10.0 +0.0 +10.0 +x [mm] +-20.0 +-10.0 +0.0 +10.0 +y [mm] +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +Normalized radiation pressure [-] +Fig. 6. Simulated radiation pressure distribution of focus. The white circle +with a diameter of 1.5 cm means the area for measuring the radiation force. +x [mm] +200 +150 +100 +50 +0 +50 +100 +150 +200 +y [mm] +200 +150 +100 +50 +0 +50 +100 +150 +200 +z [mm] +100 +50 +0 +50 +100 +150 +200 +250 +300 +Transducer +Focus +x [mm] +200 +150 +100 +50 +0 +50 +100 +150 +200 +z [mm] +100 +50 +0 +50 +100 +150 +200 +250 +300 +Fig. 7. AUTD setup for the simulation. +was tilted by 50 deg so that this disk opposes the propagation +direction of the ultrasound wave. The size of the acrylic disk +was determined based on the preliminary simulation such that +the disk size was larger than the focus size. Fig. 6 shows +the simulated radiation force distribution of a single focus +and Fig. 7 shows the ultrasound transducer setup used for +the simulation. The measurement range of the acrylic disk is +superimposed on the simulated result as a white circle. In this +simulation, the focus was generated at (0, 0, 250) mm, and the +reflection of the sound waves was not considered. The focus +position is shown in Fig. 7 as a cross mark. +VI. EXPERIMENT1: STATIONARITY AND SURFACE +CURVATURE +In this experiment, we evaluated the intensity of vibratory +and movement sensations in the LM stimulus and the per- +ceived curvature of the surface of the object produced by the +LM same stimulus (i.e., flat, convex, or concave). +A. Stimulus Condition +In this experiment, we presented the LM-M (LM-multi foci) +and LM-S (LM-single focus) stimuli at 5 Hz (as described in +Section IV). For comparison, an LM-S stimulus at 25 Hz was +also presented. The radii of LM stimuli A were 2, 3, 4, 5, and +6 mm. The motion step width dLM of the LM stimulus at 5 Hz +was as fine as 0.23 mm to elicit static pressure sensation [13]. + +BLANK +5 +mm +Flat +Convex +Concave +Radius +mm +Flat +Convex +Concave +Fig. 8. Example of the presented picture to evaluate the perceived curvature +(A = 2, 6 mm). The radius of the object was changed according to the radius +of the presented LM stimulus A. For one stimulus condition, the image of flat, +convex, and concave was sequentially presented in random order. Participants +reported the perceptual similarity between the perceived curvature and the +image. +Moreover, the step dLM at 25 Hz was 4 mm to avoid exceeding +the AUTD update limits (1 kHz) [33]. For the 5 Hz LM-M +stimuli, the number of simultaneously presented foci Nfocus +was four, and their placement interval d was 3 mm. All stimuli +were presented in random order. Each participant underwent +two sets of experiments. Therefore, 30 experimental trials were +conducted (i.e., 3 different LM stimuli × 5 stimulus radii × +2 sets = 30 experimental trials). +B. Procedure +Eight males (24–31 age) and two females (24 and 28 age) +participated in this experiment. +The experimental equipment was a visuo-tactile display +(Fig 3 and Section V). Participants were instructed to place +their index fingertips on the presented midair image marker. +The tactile stimulus was always presented while the fingertip +was touching the marker. +First, to evaluate the tactile sensation of the presented stim- +ulus, the participants answered the following two questions +with a seven-point Likert scale: +Q1. +How intensely did you perceive a vibratory sensation +in the presented stimulus? +Q2. +How intensely did you perceive the movement of the +stimulus position? +Participants were instructed to answer 1 if they perceived +no vibration or movement. In Q2, we evaluated whether the +participants noticed the circular focus movement of the LM. +Second, the participants evaluated the curvature rendered by +the LM stimulus on their finger pads. In this experiment, we +provided three typical shapes as references (i.e., flat, convex, +and concave). Three images corresponding to the three shapes +(Fig. 8) were presented to the participants as reference images. +To evaluate the perceived curvature, the participants re- +sponded to Q3 with a seven-point Likert scale. +Q3. +Does the stimulus shape perceived at your finger +pad match the situation illustrated in the reference +images? +For one stimulus condition, flat, convex, and concave reference +images (Fig. 8) were presented successively in random order. +Participants independently reported perceptual similarity to +each reference image (i.e., flat, convex, and concave). We +varied the radius of the illustrated object in the reference +images to match that of LM stimulus A. +Participants were instructed to ignore differences in the +perceived size between the image and tactile stimulus to +evaluate only the similarity of the perceived curvature (i.e., flat, +convex, and concave). The overall size of the finger sketch, +which was drawn in the reference image, was adjusted so that +its nail size matches the average Japanese adult nail length +(13.6 mm) [34]. +C. Results and Analysis +1) Stationarity: Box-and-whisker plots of the evaluated +vibratory sensations (answers to Q1) are shown in Fig. 9a. +The evaluated movement sensation (answers to Q2) is also +shown in Fig. 9b. If the data value v satisfies the following +conditions, the data are treated as an outlier: +� v ≤ v25 − 1.5 × IQR, +v ≥ v75 + 1.5 × IQR, +(7) +where v25 and v75 are the 25-percentile value and 75- +percentile value, respectively, and IQR is the interquartile +range. Outliers were plotted as white dots in the graphs. As +seven participants could not perceive the LM-M stimulus with +A = 2 mm, their answers were excluded. In total, 13 data of +the LM-M with A = 2 mm were excluded from each graph. +The results showed that the highest median value of the +vibratory sensation score was 7, and the stimulus condition +was LM-S at 25 Hz with A = 4, 5, 6 mm. The lowest median +value was 1, and the condition was LM-M at 5 Hz with A = +2 mm. The highest median value of the movement sensation +score was 6.5, and the stimulus condition was LM-S at 5 Hz +with A = 5 mm. The lowest median value was 1, and the +condition was LM-M at 5 Hz with A = 2 mm. +We conducted the Wilcoxon signed-rank test with Bonfer- +roni correction to compare the results between the stimulus +conditions (LM-M, LM-S at 5 Hz, and LM-S at 25 Hz) for +each stimulus radius A. The results of the LM-M stimulus +with A = 2 mm were excluded from the analysis. The test +results showed that at all values of A, the perceived vibratory +sensation of the LM-S at 25 Hz was significantly higher than +that of the other LM stimuli (p < 0.005). At A = 3 mm, +the vibratory sensation of the LM-S at 5 Hz was significantly +higher than that of the LM-M (p < 0.05). The results also +showed that at A = 3, 4, 5, 6 mm, the perceived movement +sensation of the LM-S at 25 Hz was significantly lower than +that of the other LM stimuli (p < 0.05). At A = 3, 4, 5 mm, +the movement sensation of the LM-M was significantly lower +than that of the LM-S at 5 Hz (p < 0.05). Fig. 9 shows these +pairs with significant differences as ”*” and ”**” for p < 0.05 +and p < 0.005, respectively. + +windowName (応答书L)windowName(応答windowName (応答书L)windowName +XwindowName (応答书L)windowName +XBLANK +6 +2 +3 +4 +5 +6 +Radius [mm] +1 +2 +3 +4 +5 +6 +7 +Percived vibration [-] +** +* +** +** +** +** +** +** +** +** +Multi at 5 Hz +Single at 5 Hz +Single at 25 Hz +(a) Evaluated vibration. +2 +3 +4 +5 +6 +Radius [mm] +1 +2 +3 +4 +5 +6 +7 +Percived movement [-] +* +** +** +* +* +** +** +* +** +** +** +** +Multi at 5 Hz +Single at 5 Hz +Single at 25 Hz +(b) Evaluated movement. +Fig. 9. +Evaluated perceptual stationarity of LM stimulus on a finger pad in experiment 1. Participants evaluated the perceived intensity of the vibratory +sensation and the focal movement sensation of the LM stimulus with a seven-point Likert scale. +2 +3 +4 +5 +6 +Radius [mm] +1 +2 +3 +4 +5 +6 +7 +Answered similarity [-] +** +** +** +** +* +Multi at 5 Hz +Flat +Convex +Concave +(a) LM-M at 5 Hz. +2 +3 +4 +5 +6 +Radius [mm] +1 +2 +3 +4 +5 +6 +7 +Answered similarity [-] +* +** +** +** +* +* +* +Single at 5 Hz +Flat +Convex +Concave +(b) LM-S at 5 Hz. +2 +3 +4 +5 +6 +Radius [mm] +1 +2 +3 +4 +5 +6 +7 +Answered similarity [-] +** +** +** +** +** +* +** +** +** +Single at 25 Hz +Flat +Convex +Concave +(c) LM-S at 25 Hz. +Fig. 10. Evaluated perceived curvature in experiment 1. The reference images +with flat, convex, and concave was presented, and the participants answered +perceptual similarity between the perceived tactile shape (curvature) and the +image with a seven-point Likert scale. +Moreover, we conducted the Friedman test with Bonferroni +correction using stimulus radius A and stimulus type (LM-M, +LM-S at 5 Hz, and LM-S at 25 Hz) as factors. The test results +showed that A and stimulus type had a significant effect on +both vibration and movement sensation (p < 0.0005). +2) Surface Curvature: Box-and-whisker plots of the evalu- +ated tactile shape (answers to Q3) with LM-M, LM-S at 5 Hz, +and LM-S at 25 Hz are shown in Fig. 10a, Fig. 10b, Fig. 10c, +respectively. 13 data of LM-M with A = 2 mm were excluded +(Section VI-B). +The highest median value for the flat score was 5.5, and the +condition was LM-S at 25 Hz with A = 4, 5 mm. The lowest +median was 3.5, and the condition was LM-S at 5Hz with +A = 6 mm. The highest median for the convex score was 5, +and the conditions were LM-M with A = 3, 4 mm and LM-S +at 5 Hz with A = 2, 3, 4 mm. The lowest median was 2, and +the condition was LM-S at 25 Hz with A = 5 mm. The highest +median value of the concave score was 5, and the condition +was LM-S at 5 Hz with A = 6 mm. The lowest median was +1.5, and the condition was LM-M with A = 3 mm. +We conducted the Wilcoxon signed-rank test with Bonfer- +roni correction to compare the score between the shapes (i.e., +flat, convex, concave) at each stimulus condition. The test +result showed that in the LM-M with A = 3, 4 mm, the flat +and convex scores were significantly higher than the concave +scores (p < 0.05). With A = 4 mm, the convex score was +significantly higher than the flat score (p < 0.005). In the LM- +S at 5 Hz with A = 2, 3 mm, the flat score was significantly +higher than the concave score (p < 0.05). For A = 2, 3, 4 mm, +the convex score was significantly higher than the concave +score (p < 0.05). With A = 2 mm, the convex score was +also significantly higher than the flat score (p < 0.05). In +the LM-S at 25 Hz, all flat scores were significantly higher +than the convex scores (p < 0.005). For A = 2, 3, 4, 5 mm, +the flat score was significantly higher than the concave score +(p < 0.05). +VII. EXPERIMENT2: PERCEIVED SIZE +In this experiment, we changed the radius of LM stimulus +A and evaluated the perceived stimulus size. + +BLANK +7 +mm +Radius +mm +mm +mm +mm +(1) +(2) +(3) +(4) +(5) +Fig. 11. Presented picture to evaluate the perceived size of the presented LM +stimulus. The five pictures with different radii (2, 3, 4, 5, and 6 mm) were +presented simultaneously. Participants selected one of these images showing +the circle whose size matches the perceived haptic size. +A. Procedure +Eight males (24–31 age) and (24 and 28 age) two females +participated in this experiment. +The experimental setup was the same as that used in Ex- +periment 1 (Fig. 3). The tactile stimulus was always presented +while the fingertip was touching the marker. The stimulus +conditions were identical to those used in Experiment 1, which +is explained in Section VI-A. 30 experimental trials were +conducted (i.e., 3 different LM stimuli × 5 stimulus radii × +2 sets = 30 experimental trials). +A real-time video of the participants’ fingers was presented +to them during the experiment. The screenshot of the presented +video is shown in Fig. 11. In this video, a blue circular +image corresponding to the trajectory of the LM stimulus is +superimposed on the finger pad of the participant. Participants +selected one of the videos showing a circle whose size matched +the perceived haptic size to evaluate the perceived size of the +presented stimulus. +The center of the circular image was changed in real-time to +match the center of the presented LM stimulus rcnt. The radii +of the circular images were 2, 3, 4, 5, and 6 mm, which were +the same as the radii of LM stimuli A used in this experiment. +Five videos with different radii were simultaneously presented +to the participant. This video was captured using an RGB +camera built into the depth camera. +B. Results and Analysis +Fig. 12 presents the confusion matrix for the stimulus size +identification results. The highest accuracy was 0.6, and the +stimulus condition was LM-S at 5 Hz with A = 5 mm. The +lowest accuracy was 0.15, and the condition was LM-S at 25 +Hz with A = 2, 4 mm. Chance rate in this experiment was +0.2, and accuracy exceeded the chance rate in all conditions, +except for the lowest-accuracy condition. +We compared the perceived size across the stimulus con- +dition (LM-M and LM-S at 5 Hz and 25 Hz, respectively). +Fig. 13 shows box-and-whisker plots of the perceived stimulus +sizes. The highest perceived stimulus radius was 5 mm, and +the condition was LM-S at 5 and 25 Hz with A = 5 mm and +all LM stimuli with A = 6 mm. The lowest radius was 2 mm, +and the conditions were LM-M with A = 2, 3 mm. +We applied the Wilcoxon signed-rank test with Bonferroni +correction to the results of the perceived size. The test results +showed that the perceived radii of the LM-S at 25 Hz were +significantly higher than that of the LM-M with A = 3, 4, 5 +(p < 0.05) and LM-S at 5 Hz with A = 2, 4, 5 mm (p < +0.05). The results also showed that the radius of the LM-S +at 5 Hz was significantly larger than that of the LM-M with +A = 3, 4 mm (p < 0.05). +VIII. EXPERIMENT3: EQUIVALENT PHYSICAL STIMULUS +This experiment investigated physically static force which +is equivalent to the pressure sensation evoked by LM stimulus +at a finger pad. Physical force was presented by pushing a +force gauge against the finger pad. +A. Setup and Stimulus +Fig. 14 illustrated the experimental setup. In this experi- +ment, we used a force gauge whose z-position was automati- +cally controlled by a 3-axis stage (QT-AMM3 and ALS-7013- +G1MR, CHUO PRECISION INDUSTRIAL Co., Ltd.) and the +visual-haptic system (Fig. 3) used in the other experiments. +This force gauge (IMADA ZTS-50N) can measure forces up +to 50 N with a resolution of 0.01 N. The stimulus condition +was the same as that used in Experiment 1, which is explained +in Section VI-A. +There were 30 experimental trials conducted (3 different +LM stimuli × 5 stimulus radii A × 2 sets = 30 experimental +trials). +B. Procedure +Eight males (23–28 age) and two females (24 and 28 age) +participated in this experiment. +Participants were instructed to place their index fingers of +their right hands on the marker presented by the midair image +display. Participants were also instructed to place their index +fingers of their left hands such that the finger pad faced the +tip of the force gauge. At this point, the force gauge did not +touch the finger pad. The force gauge was fixed in midair +in a horizontal orientation (Fig. 14). Participants grasped the +aluminum handle and fixed their finger position by placing it +in front of an acrylic auxiliary plate. A plastic cylinder with +a radius of 1 cm was attached to the tip of the force gauge. +The basal plane of the cylinder was beveled to 1 mm so that +the participants did not perceive its edges. Participants wore +headphones and listened to white noise during the experiment +to avoid hearing the driving noise of the AUTD. +A force gauge was pressed against the finger pad of the +participant by moving along the z-axis. After the force gauge +reached the specified position (the initial pushing depth was +4 mm), an LM stimulus was presented to the finger pad of the +right hand. After 2 s, the LM stimulus was stopped, and the +force gauge returned to its initial position. The force gauge +immediately started pushing again, and the LM stimulus was +presented again. This 2 s tactile stimulation was repeated +automatically. In this experimental loop, participants compared +the physical pushing force with the LM stimulus and orally +reported the results. Based on the participants’ answers, we +changed the pushing depth of the force gauge such that the + + fingerWindow +口 +X +(1) +(2) +(3) +(4) +(5) +0BLANK +8 +2 +3 +4 +5 +6 +Presented radius [mm] +2 +3 +4 +5 +6 +Answered radius [mm] +0.45 +0.6 +0.15 +0.05 +0 +0.05 +0.35 +0.4 +0.2 +0.15 +0 +0.05 +0.4 +0.3 +0.15 +0 +0 +0.05 +0.3 +0.45 +0 +0 +0 +0.15 +0.25 +Multi at 5 Hz +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +Accuracy rate [-] +2 +3 +4 +5 +6 +Presented radius [mm] +2 +3 +4 +5 +6 +Answered radius [mm] +0.45 +0.15 +0 +0.05 +0 +0.45 +0.35 +0.3 +0.15 +0.1 +0.1 +0.35 +0.4 +0.15 +0.15 +0 +0.1 +0.25 +0.6 +0.5 +0 +0.05 +0.05 +0.05 +0.25 +Single at 5 Hz +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +Accuracy rate [-] +2 +3 +4 +5 +6 +Presented radius [mm] +2 +3 +4 +5 +6 +Answered radius [mm] +0.15 +0.1 +0.05 +0 +0 +0.4 +0.2 +0 +0.05 +0 +0.3 +0.25 +0.15 +0.1 +0.05 +0.1 +0.35 +0.6 +0.4 +0.55 +0.05 +0.1 +0.2 +0.45 +0.4 +Single at 25 Hz +0.0 +0.1 +0.2 +0.3 +0.4 +0.5 +0.6 +Accuracy rate [-] +Fig. 12. Confusion matrix of the stimulus size identification. The chance rate in this experiment was 0.2. +2 +3 +4 +5 +6 +Presented radius [mm] +2 +3 +4 +5 +6 +Perceived radius [mm] +* +** +** +* +* +** +* +* +Multi at 5 Hz +Single at 5 Hz +Single at 25 Hz +Fig. 13. Evaluation result of the perceived size of the circular LM stimulus. +Repeat automatically +Force gauge +Auto 3-axis stage +Fig. 14. +Setup to evaluate the perceived force. A force gauge was pressed +against the finger pad of the left hand, and the LM stimulus was presented to +the right finger. The pushing depth was automatically controlled by the 3-axis +stage. These stimuli were terminated after 2 s and automatically repeated. +Participants compared the pushing force with the LM stimulus and orally +reported the comparison results. +1 cm +Fig. 15. Plastic cylinder attached to the tip of the force gauge, used to push +a finger pad. The radius was 1 cm, and the basal plane of the cylinder was +beveled 1 mm. +perceived intensity of the two stimuli is the same. For example, +pushing depth in the 2nd stimulus was shortened to weaken +the pushing force if the participant answered that the pushing +force was stronger than the LM stimulus in the 1st stimulus. +The force gauge kept pushing the finger pad and recorded +the pushing force for 2 s when the participants reported +that the intensities of the two stimuli were the same. The +median value of the pushing force time series data was finally +adopted as the measured force. After the measurement, the +stimulus conditions were changed, and the same procedure +was repeated. The adjustment resolution of the pushing depth +is 0.25 mm and the speed of the force gauge was 5 mm/s. The +maximum number of pushing depth adjustments was 20, and +all participants completed the experiment within 30 min. +In the stimulus comparison, we instructed the participants +to ignore the perception at the moment when the LM stimulus +and the pushing force were presented to assess the steady-state +perceived intensity of the LM stimulus. +C. Results and Analysis +In this experiment, the median value of the measured force +time series data was adopted as the participant’s answer. The +maximum standard deviation (SD) of the time series data, +median value, and minimum values were 0.522, 0.186, and +0, respectively. The maximum, median, and minimum values +were answered by a different participant. Fig. 16 shows the +times series data whose SD is the maximum (0.522) and data +whose SD is the median (0.186). The median force of each +time series data is shown in Fig. 16 as a red line. +Fig. 17 shows the box-and-whisker plots of the pushing +forces. Outliers were calculated using eq. 7, and are plotted + +BLANK +9 +0.00 +0.25 +0.50 +0.75 +1.00 +1.25 +1.50 +1.75 +2.00 +Time [s] +1.0 +1.5 +2.0 +2.5 +3.0 +Force [N] +Data with medium SD +Median +Data with maximum SD +Fig. 16. +Time series data of measured force. We calculated the standard +deviation (SD) of each recorded time series data, and the data with the +maximum SD (0.522) and with the median value of the SD (0.186) was +plotted in this figure. We also plotted the median value of these plotted time +series force. +2 +3 +4 +5 +6 +Radius [mm] +0.0 +0.5 +1.0 +1.5 +2.0 +Perceived Force [N] +* +** +* +* +* +* +** +** +Multi at 5 Hz +Single at 5 Hz +Single at 25 Hz +Fig. 17. Physically static pushing force perceptually equal to the intensity of +the LM stimulus. +as white dots. One participant was unable to perceive the LM +at 5 Hz with A = 2 mm; thus, this value was plotted as 0 N. +The forces lower than 0.01 N, which is the lowest measurable +force of the force gauge, were also plotted as 0 N. The results +showed that the highest median value of the perceived force +was 0.53 N, and the stimulus condition was LM-S at 25 Hz +with A = 4 mm. The lowest median value was 0.16 N, and +the condition was LM-M at 5 Hz with A = 2 mm. +We also conducted the Wilcoxon signed-rank test with +Bonferroni correction to compare the perceived force between +the stimulus conditions (LM-M, LM-S at 5 Hz, and LM-S +at 25 Hz) at each stimulus radius A. The test results showed +that with A = 2, 3, 4, 6 mm, the perceived force of the LM-S +stimulus at 25 Hz was significantly higher than that of the +LM-M stimulus (p < 0.05). For A = 4, 6 mm, the perceived +force of the LM-S stimulus at 25 Hz was significantly higher +than that of the LM-S stimulus at 5 Hz (p < 0.05). For +A = 2, 3 mm, the perceived force of the LM-S stimulus at +5 Hz was significantly higher than that of the LM-M stimulus +(p < 0.05). +IX. DISCUSSION +A. Static Pressure Sensation at Finger Pad +The results of Experiment 1 showed that LM at 5 Hz +(including both LM-M and LM-S) can produce a non-vibratory +pressure sensation on a finger pad. Moreover, with stimulus +radii of A = 2, 3 mm, the movement sensations were barely +perceivable, and the pressure sensation was well static. The +vibration sensation of the LM stimulus at 5 Hz was 4 or +less in all conditions except LM-M with A = 5 mm, which +was significantly lower than that of the LM-S at 25 Hz. For +A = 2, 3, the movement sensations of the LM-M were 2 or +less. +The results of Experiment 3 also showed that the perceived +intensity of the pressure sensation on the finger pad was +perceptually comparable to 0.16 N or more physical contact +force on average. With the lowest vibration and movement +sensation (LM-M with A = 3 mm), the perceived force was +0.24 N, which was 12 times the radiation pressure at the focus +presented in the setup. +However, in Experiment 3, extremely low and high forces +were identified causing large variance. For the LM-M with +A = 3 mm, the minimum and maximum values were 0 and +1.22 N, respectively. Note that the participant who answered +0 N could perceive the LM-M stimulus with A = 3 mm. Since +the answered equivalent force is less than 0.01 N, which is +the measurable minimum force of the force gauge, the force +is recorded as 0 N. This large difference in perceived force +could be attributed to the individual differences in the tactile +receptor-adaptation speed to the pushing stimulus presented by +the force gauge. The pushing force is static, and the perceived +intensity of such stimulus gradually weakens with stimulus +duration owing to SA-I (slowly-adaptive type I) tactile receptor +adaptation [35]. In Experiment 3, the contact time with the +force gauge was controlled for 2 s to prevent this adaptation +effect, and participants were instructed to ignore the perception +of the moment of the contact. However, if the adaptation speed +greatly differs among participants, even under this control, +there could be a large difference in the answered equivalent +pushing force. For example, we considered that the adaptation +speed of the participants answered an extremely high force +was fast. When the adaptation speed is fast, the perceived +intensity of the contact force rapidly weakens over the period +of 2 s, resulting in a high pushing force as the equivalent force. +Conversely, the adaptation speed of the participants answering +an extremely low force could be slow. The evaluation of the +individual differences in adaptation speed is important for +future work. +The experimental results also indicated that the perceived +intensity of the LM-M stimulus with A = 2 mm was extremely +weak. In Experiments 1 and 2, eight participants could not +perceive the LM-M stimulus with A = 2 mm. We considered +that the weakness is because the circumference with a 2 mm +radius and the length of the curved line-shaped stimulus +distribution used in LM-M (9 mm) were almost the same. +As an exception, in Experiment 3, only one participant could +not perceive the LM-M stimulus with A = 2 mm, and the +average perceived force was 0.16 N. This difference could +be attributed to the difference in the presentation time of the +LM stimulus [35]. In Experiment 3, the stimulus duration was +2 s, but in Experiments 1 and 2, the participants continued +to be presented with the LM stimulus without any time +limit. Therefore, in most participants in Experiments 1 and 2, + +BLANK +10 +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +y [mm] +LM-M A = 2 mm +0.2 +0.4 +0.6 +0.8 +1.0 +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +LM-M A = 3 mm +0.2 +0.4 +0.6 +0.8 +1.0 +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +LM-M A = 4 mm +0.2 +0.4 +0.6 +0.8 +1.0 +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +LM-M A = 5 mm +0.2 +0.4 +0.6 +0.8 +1.0 +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +LM-M A = 6 mm +0.2 +0.4 +0.6 +0.8 +1.0 +Radiaton pressure [-] +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +y [mm] +LM-S A = 2 mm +0.2 +0.4 +0.6 +0.8 +1.0 +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +LM-S A = 3 mm +0.2 +0.4 +0.6 +0.8 +1.0 +-20.0 +-10.0 +0.0 +10.0 +x [mm] +-20.0 +-10.0 +0.0 +10.0 +LM-S A = 4 mm +0.2 +0.4 +0.6 +0.8 +1.0 +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +LM-S A = 5 mm +0.2 +0.4 +0.6 +0.8 +1.0 +-20.0 +-10.0 +0.0 +10.0 +-20.0 +-10.0 +0.0 +10.0 +LM-S A = 6 mm +0.2 +0.4 +0.6 +0.8 +1.0 +Radiaton pressure [-] +Fig. 18. Simulated time-averaged radiation pressure distribution. These values were normalized. +-20.0 +-10.0 +0.0 +10.0 +x [mm] +-20.0 +-10.0 +0.0 +10.0 +y [mm] +LM-M A = 3 mm +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +Power spectrum of 5 Hz [-] +-20.0 +-10.0 +0.0 +10.0 +x [mm] +-20.0 +-10.0 +0.0 +10.0 +y [mm] +LM-S A = 3 mm +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +Power spectrum of 5 Hz [-] +Fig. 19. +Simulated 5 Hz-power spectrum distribution of time variation of +radiation pressure produced by LM-M and LM-S at 5 Hz. The power spectrum +distribution was obtained by simulating the time variation of the radiation +pressure at each point in the stimulus area and Fourier transforming the time +variation. These values were normalized. +their SA-I tactile receptors completely adapted to the LM-M +stimulus, and they could not perceive the stimulus. +B. Perceived Curvature +In Experiments 1 and 2, since eight participants could not +perceive the LM-M stimulus with a radius of 2 mm, we +excluded it from the following discussions. +The results of Experiments 1 and 2 suggest that a circular +LM stimulus with A = 2–4 mm can render a contact sensation +with a convex surface with radii of 2–4 mm. As described in +Section IX-A, particularly for the A = 2, 3 mm, the contact +sensation was well static. In the LM at 5 Hz with A = 2– +4 mm, the convex score was significantly higher than the +concave score (p < 0.05). In LM-M with A = 4 mm and +LM-S with A = 2, 4 mm, the convex score was significantly +higher than the flat score (p < 0.05). The perceived radii +for LM-M with A = 3 mm and LM-S with A = 4 mm +were 2 and 4, respectively. The comments of participants also +suggest that convex sensation was rendered. Four participants +commented that they sometimes felt in contact with sharp or +rounded objects. Based on the authors’ subjective view, we +felt the LM-M and LM-S stimuli at 5 Hz with A = 3 mm as +a contact sensation with a rounded convex surface. +However, in some cases, participants found it difficult to +determine whether the perceived contact shape was convex or +flat. Two of the participants commented that this determina- +tion was difficult. Moreover, no significant differences were +observed between the convex scores for the LM-S at 5 Hz +with A = 3 mm and LM-M with A = 3 mm. In the future, +we will quantitatively evaluate the curvature of the perceived +surface and explore a control method for the curvature. +In the LM at 5 Hz with A = 2–4 mm, all concave scores +were less than 2, and a concave sensation was not perceived. +We considered that the periphery of the LM was hardly per- +ceived in the radius range as three participants commented that +they had high concave scores when they strongly perceived +the perimeter of the stimulus. The characteristics of the time- +averaged radiation pressure distribution of the LM stimulus +were also consistent with this consideration. Fig. 18 shows the +simulated time-averaged pressure distribution. The simulation +setup is the same that shown in Fig. 7. The results indicates +that the periphery of the LM stimulus is the peak of the time- +averaged radiation pressure only above a radius of 5 mm, +where the concave score is high. +Finally, we compared the perceived curvature to the 5 Hz- +vibration intensity distribution produced by the LM stimulus +at 5 Hz. Fig. 19 shows the simulated distribution of the 5 Hz +vibration intensity (power spectrum of 5 Hz) produced by LM- +M and LM-S at 5 Hz with A = 3 mm. The power spectrum +distribution was obtained by simulating the time variation of +the radiation pressure at each point in the stimulus area and +Fourier transforming the time variation. The simulation setup +is the same that shown in Fig. 7. The simulation results showed +that the physical intensity of the 5 Hz vibration was the highest +on the focal orbits and does not match the perceived stimulus +shape (perceived curvature). With A = 3 mm, the LM-M +and LM-S at 5 Hz were perceived as contact with a convex +surface. However, even under these conditions, the peaks of +vibration intensity formed a circle, which is a contact shape +with concave. In the future, we will investigate the relationship +between perceived curvature and vibration intensity distribu- + +BLANK +11 +tion by measuring or simulating skin displacement generated +by the LM stimulus as in previous studies [36], [37]. +C. Comparison of LM-M and LM-S at 5 Hz +The results of Experiment 1 showed that the curved line- +shaped pressure distribution, which consists of four ultrasound +foci and is used in LM-M, can suppress the movement +sensation of low-frequency LM stimuli. With A = 3, 4, 5, the +movement sensation of the LM-M was significantly lower than +that of the LM-S at 5 Hz. We considered that the reason for the +suppression of motion perception was that the simultaneously +stimulated area of LM-M was wider than that of LM-S. +The LM-M stimulus was perceived to be smaller than the +LM-S stimulus at 5 Hz. For A = 3, 4 mm, the perceived +size of LM-M was significantly smaller than that of LM-S at +5 Hz (p < 0.05). The trend in perceived size is consistent +with the difference in the size of the time-averaged radiation +pressure distribution. The simulation results shown in Fig. 18 +indicate that the time-averaged distributions of LM-M with +A = 3, 4 mm were smaller than those of LM-S. +In terms of vibratory sensation and perceived shape (cur- +vature), there were no huge differences between LM-M and +LM-S at 5 Hz. Except for A = 3 mm, there were no significant +differences in vibration sensations. For A = 3, 4, 5 mm, the +convex scores was higher than the flat and concave scores for +both the M-M and LM-S at 5 Hz. +D. Comparison of Movement Sense Between LM Frequencies +The results of Experiment 1 showed that the movement +sensation of the LM at 25 Hz was lower than that of the LM +at 5 Hz. At A = 3, 4, 5, 6 mm, the movement sensation of the +LM-S at 25 Hz was significantly lower than that of LM-M and +LM-S at 5 Hz. We considered that this was because the focus +speed at f LM = 25 Hz was too fast for the participants to +perceive movement different from the vibration. This results +consisted with the previous study [38]. They presented circular +STM stimuli with a diameter of 4–7 cm on the palm and +found that the focal movement can not be perceived when the +movement speed of the focus was above 18 Hz. +The results also showed that rendering a convex surface +was difficult with the vibratory sensation produced by focused +ultrasound. As the vibration score of LM-S at 25 Hz was 6 +or higher, this stimulus evoked a vibratory sensation in the +experiments. In the LM at 25 Hz, the flat score is the highest +for all radii and was significantly greater than the convex score. +One participant commented that the contact shape often felt +flat when vibration was perceived. +X. CONCLUSION +In this study, we verified that ultrasound radiation pres- +sure distribution, which spatiotemporally varies at 5 Hz, can +provide a static pressure sensation on a finger pad. We also +demonstrated that the pressure sensation on the finger pad was +perceived as a static contact sensation with a convex surface. In +the experiment, four ultrasound focal points were presented on +the finger pads of the participant and they were simultaneously +rotated in a circle at 5 Hz. When the radius of the focal +trajectory was 3 mm, the perceived vibration and movement +sensations were the lowest, 1.5 and 2 out of 7 on average, +respectively. The perceived intensity of this evoked pressure +sensation was equivalent to a 0.24 N physically constant force +lasting for 2 s, which is 12 times the physically presented +radiation force at the focus. Under the most static condition, +the pressure sensation was perceived as a contact sensation +on a convex surface with a radius of 2 mm. The average +perceptual similarity was 5 out of 7. +From these results, we conclude that focused ultrasound can +render a static contact sensation at a finger pad with a small +convex surface. This contact sensation rendering enables the +noncontact tactile reproduction of a static-fine uneven surface. +In the future, we will investigate curvature control of the +rendered convex surface. +REFERENCES +[1] I. Rakkolainen, E. Freeman, A. Sand, R. Raisamo, and S. Brewster, +“A survey of mid-air ultrasound haptics and its applications,” IEEE +Transactions on Haptics, 2020. +[2] T. Iwamoto and H. Shinoda, “Ultrasound tactile display for stress field +reproduction-examination of non-vibratory tactile apparent movement,” +in First Joint Eurohaptics Conference and Symposium on Haptic Inter- +faces for Virtual Environment and Teleoperator Systems. World Haptics +Conference. +IEEE, 2005, pp. 220–228. +[3] T. Hoshi, M. Takahashi, T. Iwamoto, and H. Shinoda, “Noncontact +tactile display based on radiation pressure of airborne ultrasound,” IEEE +Transactions on Haptics, vol. 3, no. 3, pp. 155–165, 2010. +[4] T. Carter, S. A. Seah, B. Long, B. Drinkwater, and S. Subramanian, +“Ultrahaptics: multi-point mid-air haptic feedback for touch surfaces,” +in Proceedings of the 26th annual ACM symposium on User interface +software and technology. +ACM, 2013, pp. 505–514. +[5] K. Yosioka and Y. Kawasima, “Acoustic radiation pressure on a com- +pressible sphere,” Acta Acustica united with Acustica, vol. 5, no. 3, pp. +167–173, 1955. +[6] S. Suzuki, M. Fujiwara, Y. Makino, and H. Shinoda, “Midair hand +guidance by an ultrasound virtual handrail,” in 2019 IEEE World Haptics +Conference (WHC). +IEEE, 2019, pp. 271–276. +[7] A. Yoshimoto, K. Hasegawa, Y. Makino, and H. Shinoda, “Midair haptic +pursuit,” IEEE transactions on haptics, vol. 12, no. 4, pp. 652–657, 2019. +[8] E. Freeman, D.-B. Vo, and S. Brewster, “Haptiglow: Helping users +position their hands for better mid-air gestures and ultrasound haptic +feedback,” in 2019 IEEE World Haptics Conference (WHC). +IEEE, +2019, pp. 289–294. +[9] Y. Monnai, K. Hasegawa, M. Fujiwara, K. Yoshino, S. Inoue, and +H. Shinoda, “Haptomime: mid-air haptic interaction with a floating +virtual screen,” in Proceedings of the 27th annual ACM symposium on +User interface software and technology, 2014, pp. 663–667. +[10] T. Romanus, S. Frish, M. Maksymenko, W. Frier, L. Corenthy, and +O. Georgiou, “Mid-air haptic bio-holograms in mixed reality,” in 2019 +IEEE international symposium on mixed and augmented reality adjunct +(ISMAR-Adjunct). +IEEE, 2019, pp. 348–352. +[11] T. Morisaki, M. Fujiwara, Y. Makino, and H. Shinoda, “Midair haptic- +optic display with multi-tactile texture based on presenting vibration and +pressure sensation by ultrasound,” in SIGGRAPH Asia 2021 Emerging +Technologies, 2021, pp. 1–2. +[12] Y. Makino, Y. Furuyama, S. Inoue, and H. Shinoda, “Haptoclone (haptic- +optical clone) for mutual tele-environment by real-time 3d image transfer +with midair force feedback.” in CHI, 2016, pp. 1980–1990. +[13] T. Morisaki, M. Fujiwara, Y. Makino, and H. Shinoda, “Non-vibratory +pressure sensation produced by ultrasound focus moving laterally and +repetitively with fine spatial step width,” IEEE Transactions on Haptics, +vol. 15, no. 2, pp. 441–450, 2021. +[14] R. S. Johansson and ˚A. B. Vallbo, “Tactile sensory coding in the glabrous +skin of the human hand,” Trends in neurosciences, vol. 6, pp. 27–32, +1983. +[15] S. J. Bolanowski Jr, G. A. Gescheider, R. T. Verrillo, and C. M. +Checkosky, “Four channels mediate the mechanical aspects of touch,” +The Journal of the Acoustical society of America, vol. 84, no. 5, pp. +1680–1694, 1988. + +BLANK +12 +[16] K. Hasegawa and H. Shinoda, “Aerial vibrotactile display based on mul- +tiunit ultrasound phased array,” IEEE transactions on haptics, vol. 11, +no. 3, pp. 367–377, 2018. +[17] R. Takahashi, K. Hasegawa, and H. Shinoda, “Lateral modulation of +midair ultrasound focus for intensified vibrotactile stimuli,” in Inter- +national Conference on Human Haptic Sensing and Touch Enabled +Computer Applications. +Springer, 2018, pp. 276–288. +[18] ——, “Tactile stimulation by repetitive lateral movement of midair +ultrasound focus,” IEEE transactions on haptics, vol. 13, no. 2, pp. +334–342, 2019. +[19] W. Frier, D. Ablart, J. Chilles, B. Long, M. Giordano, M. Obrist, +and S. Subramanian, “Using spatiotemporal modulation to draw tactile +patterns in mid-air,” in International Conference on Human Haptic +Sensing and Touch Enabled Computer Applications. +Springer, 2018, +pp. 270–281. +[20] T. Howard, G. Gallagher, A. L´ecuyer, C. Pacchierotti, and M. Marchal, +“Investigating the recognition of local shapes using mid-air ultrasound +haptics,” in 2019 IEEE World Haptics Conference (WHC). IEEE, 2019, +pp. 503–508. +[21] Z. Somei, T. Morisaki, Y. Toide, M. Fujiwara, Y. Makino, and H. Shin- +oda, “Spatial resolution of mesoscopic shapes presented by airborne +ultrasound,” in International Conference on Human Haptic Sensing and +Touch Enabled Computer Applications. +Springer, 2022, pp. 243–251. +[22] W. Frier, D. Pittera, D. Ablart, M. Obrist, and S. Subramanian, “Sam- +pling strategy for ultrasonic mid-air haptics,” in Proceedings of the 2019 +CHI Conference on Human Factors in Computing Systems, 2019, pp. +1–11. +[23] G. Korres and M. Eid, “Haptogram: Ultrasonic point-cloud tactile +stimulation,” IEEE Access, vol. 4, pp. 7758–7769, 2016. +[24] D. Hajas, D. Pittera, A. Nasce, O. Georgiou, and M. Obrist, “Mid-air +haptic rendering of 2d geometric shapes with a dynamic tactile pointer,” +IEEE transactions on haptics, vol. 13, no. 4, pp. 806–817, 2020. +[25] P. Marti, O. Parlangeli, A. Recupero, S. Guidi, and M. Sirizzotti, “Mid- +air haptics for shape recognition of virtual objects,” Ergonomics, pp. +1–19, 2021. +[26] L. Mulot, G. Gicquel, Q. Zanini, W. Frier, M. Marchal, C. Pacchierotti, +and T. Howard, “Dolphin: A framework for the design and perceptual +evaluation of ultrasound mid-air haptic stimuli,” in ACM Symposium on +Applied Perception 2021, 2021, pp. 1–10. +[27] L. Mulot, G. Gicquel, W. Frier, M. Marchal, C. Pacchierotti, and +T. Howard, “Curvature discrimination for dynamic ultrasound mid-air +haptic stimuli,” in 2021 IEEE World Haptics Conference (WHC). IEEE, +2021, pp. 1145–1145. +[28] S. Inoue, Y. Makino, and H. Shinoda, “Active touch perception produced +by airborne ultrasonic haptic hologram,” in 2015 IEEE World Haptics +Conference (WHC). +IEEE, 2015, pp. 362–367. +[29] B. Long, S. A. Seah, T. Carter, and S. Subramanian, “Rendering vol- +umetric haptic shapes in mid-air using ultrasound,” ACM Transactions +on Graphics (TOG), vol. 33, no. 6, pp. 1–10, 2014. +[30] A. Matsubayashi, Y. Makino, and H. Shinoda, “Direct finger manipula- +tion of 3d object image with ultrasound haptic feedback,” in Proceedings +of the 2019 CHI Conference on Human Factors in Computing Systems, +2019, pp. 1–11. +[31] A. Matsubayashi, H. Oikawa, S. Mizutani, Y. Makino, and H. Shinoda, +“Display of haptic shape using ultrasound pressure distribution forming +cross-sectional shape,” in 2019 IEEE World Haptics Conference (WHC). +IEEE, 2019, pp. 419–424. +[32] S. Inoue, Y. Makino, and H. Shinoda, “Scalable architecture for airborne +ultrasound tactile display,” in International AsiaHaptics conference. +Springer, 2016, pp. 99–103. +[33] S. Suzuki, S. Inoue, M. Fujiwara, Y. Makino, and H. Shinoda, “Autd3: +Scalable airborne ultrasound tactile display,” IEEE Transactions on +Haptics, 2021. +[34] N. I. of Advanced Industrial Science and Technology, “Icam: Identifi- +cation code of anthropometric measurements,” 2011, https://www.airc. +aist.go.jp/dhrt/hand/data/list.html. +[35] A. B. Vallbo, R. S. Johansson et al., “Properties of cutaneous mechanore- +ceptors in the human hand related to touch sensation,” Hum neurobiol, +vol. 3, no. 1, pp. 3–14, 1984. +[36] J. Chilles, W. Frier, A. Abdouni, M. Giordano, and O. Georgiou, “Laser +doppler vibrometry and fem simulations of ultrasonic mid-air haptics,” +in 2019 IEEE World Haptics Conference (WHC). +IEEE, 2019, pp. +259–264. +[37] W. Frier, A. Abdouni, D. Pittera, O. Georgiou, and R. Malkin, “Simulat- +ing airborne ultrasound vibrations in human skin for haptic applications,” +IEEE Access, vol. 10, pp. 15 443–15 456, 2022. +[38] E. Freeman and G. Wilson, “Perception of ultrasound haptic focal +point motion,” in Proceedings of the 2021 International Conference on +Multimodal Interaction, 2021, pp. 697–701. +Tao Morisaki Tao Morisaki is a Ph.D. student +with the Graduate School of Frontier Sciences, the +University of Tokyo, since 2020. He received the +M.S. degree from the Department of Complexity +Science and Engineering from the University of +Tokyo, Chiba, Japan, in 2020. His research inter- +ests include haptics, ultrasound midair haptics, and +human-computer interaction. He is a member of +VRSJ. +Masahiro Fujiwara He is a project assistant pro- +fessor in the Graduate School of Frontier Sciences, +the University of Tokyo, Japan. He received the BS +degree in Engineering, the MS degree and the PhD +degree in Information Science and Technology from +the University of Tokyo, in 2010, 2012, and 2015, re- +spectively. His research interests include information +physics, haptics, non-contact sensing and application +systems related to them. He is a member of IEEE. +Yasutoshi Makino Yasutoshi Makino is an asso- +ciate professor in the Department of Complexity +Science and Engineering in the University of Tokyo. +He received his PhD in Information Science and +Technology from the Univ. of Tokyo in 2007. He +worked as a researcher for two years in the Univ. of +Tokyo and an assistant professor in Keio University +from 2009 to 2013. From 2013 he moved to the +Univ. of Tokyo as a lecture, and he is an associate +professor from 2017. His research interest includes +haptic interactive systems. +Hiroyuki Shinoda Hiroyuki Shinoda is a Professor +at the Graduate School of Frontier Sciences, the +University of Tokyo. After receiving a Ph.D. in +engineering from the University of Tokyo, he was +an Associate Professor at Tokyo University of Agri- +culture and Technology from 1995 to 1999. He was +a Visiting Scholar at UC Berkeley in 1999 and was +an Associate Professor at the University of Tokyo +from 2000 to 2012. His research interests include +information physics, haptics, mid-air haptics, two- +dimensional communication, and their application +systems. He is a member of SICE, IEEJ, RSJ, JSME, VRSJ, IEEE and ACM. + +又怡电 \ No newline at end of file diff --git a/ztFJT4oBgHgl3EQfjCwq/content/tmp_files/load_file.txt b/ztFJT4oBgHgl3EQfjCwq/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..15d99c78a5965d0465914b14415d88b8b12a9d31 --- /dev/null +++ b/ztFJT4oBgHgl3EQfjCwq/content/tmp_files/load_file.txt @@ -0,0 +1,1219 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf,len=1218 +page_content='BLANK 1 Noncontact Haptic Rendering of Static Contact with Convex Surface Using Circular Movement of Ultrasound Focus on a Finger Pad Tao Morisaki, Masahiro Fujiwara, Member, IEEE Yasutoshi Makino, and Hiroyuki Shinoda, Member, IEEE Abstract—A noncontact tactile stimulus can be presented by focusing airborne ultrasound on the human skin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Focused ultra- sound has recently been reported to produce not only vibration but also static pressure sensation on the palm by modulating the sound pressure distribution at a low frequency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This finding expands the potential for tactile rendering in ultrasound haptics as static pressure sensation is perceived with a high spatial resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this study, we verified that focused ultrasound can render a static pressure sensation associated with contact with a small convex surface on a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This static contact rendering enables noncontact tactile reproduction of a fine uneven surface using ultrasound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the experiments, four ultrasound foci were simultaneously and circularly rotated on a finger pad at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' When the orbit radius was 3 mm, vibration and focal movements were barely perceptible, and the stimulus was perceived as static pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, under the condition, the pressure sensation rendered a contact with a small convex surface with a radius of 2 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The perceived intensity of the static contact sensation was equivalent to a physical contact force of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='24 N on average, which was 12 times the radiation force physically applied to the skin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Index Terms—Static contact sensation, convex surface, midair haptics, focused ultrasound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' INTRODUCTION A IRBORNE ultrasound tactile display (AUTD), which can present a noncontact tactile stimulus, is a promising tool for haptics since it dose not require users to physically contact with any devices [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' An AUTD is a device with an array of independently controllable ultrasound transducers [2], [3], [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' AUTDs can focus ultrasound waves on arbitrary points in the air by controlling the phase of each transducer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' At the focus, a nonnegative force called acoustic radiation force is generated [5], which conveys a noncontact tactile stimulus onto human skin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This has been used in various applications [1], such as human motion guidance [6], [7], [8], touchable midair image displays [9], [10], [11], and remote visual-haptic communication system [12], as the noncontact stimulus by AUTD does not obstruct a user’s movement and vision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Recently, Morisaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' reported that AUTD can present not only vibratory sensations but also static pressure sensa- tions [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A static pressure sensation is indispensable for Manuscript received xx;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' revised xx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This work was supported in part by JSPS KAKENHI Grant Number 21J12305 and JST CREST JPMJCR18A2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The authors are with the Graduate School of Frontier Sciences, the University of Tokyo, Kashiwa-shi, Chiba, 277-8561, Japan (e-mail: morisaki@hapis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='u-tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='jp;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Masahiro Fujiwara@ipc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='u-tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='jp;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' ya- sutoshi makino@k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='u-tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='jp;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' hiroyuki shinoda@k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='u-tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='jp).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' tactile displays because the sensation is the main component of contact perception and is perceived with a higher resolution than vibratory sensations [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, in the conventional ultrasound haptics technique, a static pressure sensation is excluded from the presentable sensation of the AUTD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Ultra- sound radiation force must be spatiotemporally modulated as it is less than several tens of mN [15], [16], [17], [18], [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This modulation has limited the tactile stimulus presented by the AUTD to a vibratory sensation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Morisaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' addressed this limitation and found that AUTD can present a static pressure sensation by repeatedly moving an ultrasound focus along the human skin at 5 Hz with a 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 mm spatial step width of the focus movement [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The focal trajectory was a 6 mm line, and the presentation location was a palm only.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this study, we experimentally demonstrate that static pressure sensation by ultrasound can be evoked even at a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, we also show that by using a circular focal trajectory, the pressure sensation can render a static contact with a small convex surface on the finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The radius of the rendered convex surface is varied from 2 to 4 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Rendering static contact with such a small convex surface has been difficult for conventional ultrasound haptics techniques because the perceptual resolution of vibratory sensations is lower than that of static pressure sensations [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This contact sensation rendering enables the noncontact tactile reproduction of fine corrugated surfaces with a minimal spot size of several millimeters, which is equivalent to a spatial resolution of 1 cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Previous studies rendered an uneven surface (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', bumps and holes) using ultrasound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, in these studies, the contact sensation was not static as the finger and palm must be moved to perceive the rendered surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Howard et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' and Somei et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' rendered an uneven surface by dynamically changing the intensity or position of the ultrasound focus according to hand movement [20], [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the experiment, an ultrasound focus rotating in a circle at 5 Hz is presented to a finger pad, and the radius of the trajectory is varied from 2 to 6 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We evaluate the intensity of the vibratory and movement sensations of the focus produced by the presented stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We also evaluated curvature of the tactile shape (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', flat, convex, or concave) perceived on the finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, we examine the optimal ultrasound focus shape for creating a perfect static pressure sensation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='11572v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='HC] 27 Jan 2023 BLANK 2 II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' RELATED WORKS In this section, we summarize previous studies on point stimulation and haptic shape rendering using ultrasound to clarify the contribution of this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Vibratory and Static Pressure Sensation by Ultrasound Two presentation methods have been employed to create a single point vibrotactile sensation: Amplitude Modulation (AM) [16] and Lateral Modulation (LM) [17], [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' AM is a stimulation method wherein the amplitude of the presented radiation pressure is temporally modulated [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In LM, a vibratory stimulus is presented by periodically moving a single stimulus point (ultrasound focus) along the skin surface with constant pressure [17], [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Takahashi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' presented an LM stimulus on the palm and showed that its perceptual threshold was lower than that of the AM stimulus [17], [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The focal trajectory used by Takahashi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' was a line and circle with representative lengths of a few millimeters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Additionally, Spatiotemporal Modulation (STM) method have been used to create a larger trajectory of a moving focus [19], [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' presented a circular STM stimulus with circumferences of 4–10 cm, which were larger than that of the LM stimulus presented by Takahashi et al [19], [17], [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A static pressure sensation can be produced by a low- frequency LM stimulus with a fine spatial step width of the focal movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Morisaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' presented a static pressure sensation using an LM stimulus at 5 Hz with a step width of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 mm [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The focal trajectory was a 6 mm line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Under this condition, the vibratory sensation included in the LM stimulus was suppressed to 5% in a subjective measure, and the perceived intensity was comparable to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='21 N physical pushing force on average.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The pressure sensation by ultra- sound has been presented only on the palm, and whether the pressure sensation can be evoked on a finger pad has not been confirmed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This study aims to present the static pressure sensation to a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Morisaki et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' and Somei et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' presented a low frequency-fine step LM stimulus to a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, they did not evaluate its tactile feeling [11], [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Rendering Haptic Shape Using Ultrasound Several studies have presented symbolic two-dimensional haptic shapes, such as a line and circle on the palm using AUTD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' To render them, Korres and Eid used AM with multiple foci [23].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Marti et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' and Hajas et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' used STM stimulus, wherein the focal trajectory is the perimeter of the target shape [24], [25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Mulot et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' drew a curved line to the palm using STM stimulus and evaluated whether its curvature can be discriminated [26], [27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, AUTD has been used for tactile reproduction of contact between 3D objects and hands.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Inoue et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' presented a 3D static haptic image using an ultrasound standing wave [28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Long et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' presented multiple ultrasound foci on a palm and rendered the contact shape with a virtual 3D object [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Matsubayashi calculated the contact area between a finger and a virtual 3D object and rendered this area to a finger 151.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 mm 192 mm Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' One unit of airborne ultrasound tactile display (AUTD) used in this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The one AUTD unit is equipped with 249 ultrasound transducers operating at 40 kHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' pad by presenting an LM stimulus whose focal trajectory was the perimeter of the calculated contact area [30], [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' These studies aimed to reproduce the macroscopic shape of a 3D object and did not reproduce contact shape with a fingertip- sized small convex surface, as in this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, static pressure sensations were not presented in these studies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Long et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' used AM at 200 Hz [29] and Matsubayashi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' LM at 100 Hz [30], [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The static haptic image presented by Inoue et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' was not modulated, but the participants had to keep moving their hands to perceive its tactile sensations [28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Several studies have reproduced uneven surfaces using AUTD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Howard et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' presented three tactile shapes to a palm: bump, hole, and flat, by dynamically changing the intensity of the ultrasound focus based on the hand position [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Somei et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' presented a convex surface sensation to a finger pad by changing the position of the ultrasound tactile stimulus according to finger position [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Perceiving tactile shapes using these methods require active finger or hand movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, this study aims to perceive a static convex shape while the fingers are stationary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' AIRBORNE ULTRASOUND TACTILE DISPLAY (AUTD) In this study, we used Airborne Ultrasound Tactile Display (AUTD) to present noncontact tactile stimuli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' AUTD com- prises an array of ultrasound transducers [2], [3], [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' An AUTD can focus ultrasound by controlling a phase of each transducer, and focused ultrasound generates a nonnegative pressure called acoustic radiation pressure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Ultrasound focus can be narrowed to the diffraction limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Four AUTDs were used in the experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The one AUTD unit was equipped with 249 ultrasound transducers operating at 40 kHz (TA4010A1, NIPPON CERAMIC Co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', Ltd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=') [32], [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1 shows the AUTD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Each AUTD communicated via the EtherCat protocol and was synchronously driven.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' STIMULUS DESIGN A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Overview In this section, we propose and describe two stimulus methods: LM-single focus (LM-S) and LM-multi foci (LM- M).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the subject experiment, we compared and evaluated them to investigate whether they could render a static contact sensation with convex surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2 shows a schematic of BLANK 3 Step width Focal position Radius Single focus (LM-S) Positions of multi foci Multi foci (LM-M) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Schematic of LM-S (single focus) stimulus and LM-M (multi foci) stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the LM-S, a single focus is periodically moved in a circle on a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the LM-M, multiple foci are simultaneously rotated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The foci are placed along with the circular trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' y z x Midair image display 4 AUTD units Depth camera 20 deg x y z Midair image deg x y z Depth camera Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Experimental equipment used in all subject experiments in this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This equipment presents a midair image marker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' An ultrasound tactile stimulus (LM stimulus) is presented when the finger of a participant touches this marker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The image marker was used to indicate a finger positron to a participant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' these stimulus methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In LM-S, a single ultrasound focus is periodically moved in a circle on the finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The LM-S has been used in previous studies [19], [18], [30];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' however, these studies have not evaluated whether this stimulus can produce static pressure and static contact sensations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the LM-M stimulus, multiple ultrasound foci were simultaneously presented and periodically moved in a circle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The foci were placed along the circular focal trajectory so that they were in close proximity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The distance between foci d was fixed at 3 mm in the experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the experiment, the amplitude of each transducer was set to maximum and the driving phase for presenting the LM-M stimulus was calculated using a linear synthesis scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Let φi ∈ RNtrans be the phase for presenting each focus in the LM-M, and the phase for simultaneously presenting multiple foci φ ∈ RNtrans is expressed as follows: φ = Nfocus � i φi, (1) where i ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='Nfocus} is the index number of multiple foci, Nfocus is the total number of multiple foci, and Ntrans is the total number of transducers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Formulation First, we formulated a focus movement for the LM-S stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The focus position in LM-S rj ∈ R3 is given by the following: rj = rcnt + A(cos θjra + sin θjrb) + zjrc, (2) θj = 2π N (j − 1), (3) where j ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='N} is the index of the focus position, N is the total number of focus positions in one cycle of the LM, rcnt ∈ R3 is the center of the focal trajectory, and A is the radius of the trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' ra, rb, and rc are unit vectors whose origin is at rcnt and parallel to the x-, y-, and z-axis, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The value of zj was determined using the measured finger depth position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Based on these definitions, the step width of the focus movement is dLM = 2πA N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The index of focus position j changes after the dwell time of focus td.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Dwell time was td = 1 Nf LM if the frequency of the LM stimulus is f LM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Second, we formulated the LM-M stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Let ri,j ∈ R3 be the focus position on the LM trajectory of the i-th focus among the foci presented simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' ri,j is chosen from rj, which is the position discretized with dLM, such that the motion step width of the multi foci is fixed to dLM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The conversion from rj to ri,j is expressed as follows: ri,j = rj+(i−1)l, (4) l = � d dLM � , (5) where l is the index number calculated from the distance between the multi foci d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' l is an integer, and the decimal point is rounded down.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' EXPERIMENTAL EQUIPMENT In this section, we describe the experimental equipment that presents a midair image with noncontact tactile feedback.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This equipment was used in all the subject experiments conducted in this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' System Overview Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3 shows the experimental equipment and its coordinate system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This system consists of the four AUTDs, a midair image display (ELF-SR1 Spatial Reality Display, SONY), and a depth camera (RealSense D435, Intel) used to measure the finger position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the experiments, we used the midair image display to instruct participants where to put their fingers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The coordinate system is a right-handed system whose origin is the center of the surface of the image display.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Throughout all the experiments, the system presented a 1 × 1 cm image marker at (0, 30, 30) mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Ultrasound waves were output from the AUTDs when participants placed their fingertips on the marker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The presented ultrasound wave refracted on the surface of the image display and then focused on the finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The position of the reflected ultrasound focus rref ∈ R3 can be calculated as the mirror image of the original focus position rorg ∈ R3 which is expressed as follows: rref = rorg + 2((rp − rorg) · n)n, (6) where n is the normal vector of the display surface (reflective surface), and rorg is an arbitrary point on the display surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 国BLANK 4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Detecting contact area 1 cm 2 cm Detection area Contact area 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Calculating LM trajectory LM trajectory Centroid Calculating Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Algorithm for presenting LM stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The size of the detection area is 1 × 1 × 2 cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The part of the finger within this detection area is measured as the contact area, and the focal trajectory of the LM stimulus is calculated using this area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The center of the LM is the centroid of the contact area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' deg Force gauge Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Setup for measuring radiation force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The tip of the force gauge to which a 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 cm diameter acrylic disk was attached was placed at the focal point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The force gauge was tilted 50 deg so that this disk opposed the propagation direction of the ultrasound wave.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Algorithm for Presenting LM Stimulus In the system, there are three processes for presenting a circular LM stimulus to the finger pad of the participant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 4 illustrates the presentation process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' First, the system detects the contact area between a participant’s finger and midair image marker using a depth camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The size of the image marker is 1×1×0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, to measure the contact position stably, we used the area from the surface of the image marker to 2 cm behind (1 × 1 × 2 cm) for the contact detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Part of the finger within the detection area was measured as the contact area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Second, the system calculated the focal trajectory for the circular LM stimulus using eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2 or eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The center position of the LM stimulus rcnt was the centroid of the detected contact area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The measured depth map of the fingertip surface was used for the z-position of the focal trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Third, the focus is presented and moved along with the calculated trajectory at a pre-specified frequency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this algorithm, the rcnt is asynchronously updated with the focus position at 90 fps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A Gaussian filter was applied to the calculated rcnt of 10 frames to suppress the measurement error of the depth camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Measurement of Radiation Force We measured the radiation force of the focus presented by the system and it was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='02 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 5 shows the measurement setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this experiment, the tip of a force gauge, to which a 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 cm diameter acrylic disk was attached, was placed at the focal point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This force gauge (IMADA ZTS-2N) can measure forces up to 2 N with a resolution of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='001 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The force gauge 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 x [mm] 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 y [mm] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 Normalized radiation pressure [-] Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Simulated radiation pressure distribution of focus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The white circle with a diameter of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 cm means the area for measuring the radiation force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' x [mm] 200 150 100 50 0 50 100 150 200 y [mm] 200 150 100 50 0 50 100 150 200 z [mm] 100 50 0 50 100 150 200 250 300 Transducer Focus x [mm] 200 150 100 50 0 50 100 150 200 z [mm] 100 50 0 50 100 150 200 250 300 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' AUTD setup for the simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' was tilted by 50 deg so that this disk opposes the propagation direction of the ultrasound wave.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The size of the acrylic disk was determined based on the preliminary simulation such that the disk size was larger than the focus size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 6 shows the simulated radiation force distribution of a single focus and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 7 shows the ultrasound transducer setup used for the simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The measurement range of the acrylic disk is superimposed on the simulated result as a white circle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this simulation, the focus was generated at (0, 0, 250) mm, and the reflection of the sound waves was not considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The focus position is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 7 as a cross mark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' EXPERIMENT1: STATIONARITY AND SURFACE CURVATURE In this experiment, we evaluated the intensity of vibratory and movement sensations in the LM stimulus and the per- ceived curvature of the surface of the object produced by the LM same stimulus (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', flat, convex, or concave).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Stimulus Condition In this experiment, we presented the LM-M (LM-multi foci) and LM-S (LM-single focus) stimuli at 5 Hz (as described in Section IV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For comparison, an LM-S stimulus at 25 Hz was also presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The radii of LM stimuli A were 2, 3, 4, 5, and 6 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The motion step width dLM of the LM stimulus at 5 Hz was as fine as 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='23 mm to elicit static pressure sensation [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' BLANK 5 mm Flat Convex Concave Radius mm Flat Convex Concave Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Example of the presented picture to evaluate the perceived curvature (A = 2, 6 mm).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The radius of the object was changed according to the radius of the presented LM stimulus A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For one stimulus condition, the image of flat, convex, and concave was sequentially presented in random order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants reported the perceptual similarity between the perceived curvature and the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, the step dLM at 25 Hz was 4 mm to avoid exceeding the AUTD update limits (1 kHz) [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For the 5 Hz LM-M stimuli, the number of simultaneously presented foci Nfocus was four, and their placement interval d was 3 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' All stimuli were presented in random order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Each participant underwent two sets of experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Therefore, 30 experimental trials were conducted (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', 3 different LM stimuli × 5 stimulus radii × 2 sets = 30 experimental trials).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Procedure Eight males (24–31 age) and two females (24 and 28 age) participated in this experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The experimental equipment was a visuo-tactile display (Fig 3 and Section V).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants were instructed to place their index fingertips on the presented midair image marker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The tactile stimulus was always presented while the fingertip was touching the marker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' First, to evaluate the tactile sensation of the presented stim- ulus, the participants answered the following two questions with a seven-point Likert scale: Q1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' How intensely did you perceive a vibratory sensation in the presented stimulus?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Q2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' How intensely did you perceive the movement of the stimulus position?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants were instructed to answer 1 if they perceived no vibration or movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In Q2, we evaluated whether the participants noticed the circular focus movement of the LM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Second, the participants evaluated the curvature rendered by the LM stimulus on their finger pads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this experiment, we provided three typical shapes as references (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', flat, convex, and concave).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Three images corresponding to the three shapes (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 8) were presented to the participants as reference images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' To evaluate the perceived curvature, the participants re- sponded to Q3 with a seven-point Likert scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Q3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Does the stimulus shape perceived at your finger pad match the situation illustrated in the reference images?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For one stimulus condition, flat, convex, and concave reference images (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 8) were presented successively in random order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants independently reported perceptual similarity to each reference image (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', flat, convex, and concave).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We varied the radius of the illustrated object in the reference images to match that of LM stimulus A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants were instructed to ignore differences in the perceived size between the image and tactile stimulus to evaluate only the similarity of the perceived curvature (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', flat, convex, and concave).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The overall size of the finger sketch, which was drawn in the reference image, was adjusted so that its nail size matches the average Japanese adult nail length (13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 mm) [34].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Results and Analysis 1) Stationarity: Box-and-whisker plots of the evaluated vibratory sensations (answers to Q1) are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 9a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The evaluated movement sensation (answers to Q2) is also shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 9b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' If the data value v satisfies the following conditions, the data are treated as an outlier: � v ≤ v25 − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 × IQR, v ≥ v75 + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 × IQR, (7) where v25 and v75 are the 25-percentile value and 75- percentile value, respectively, and IQR is the interquartile range.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Outliers were plotted as white dots in the graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' As seven participants could not perceive the LM-M stimulus with A = 2 mm, their answers were excluded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In total, 13 data of the LM-M with A = 2 mm were excluded from each graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results showed that the highest median value of the vibratory sensation score was 7, and the stimulus condition was LM-S at 25 Hz with A = 4, 5, 6 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The lowest median value was 1, and the condition was LM-M at 5 Hz with A = 2 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The highest median value of the movement sensation score was 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5, and the stimulus condition was LM-S at 5 Hz with A = 5 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The lowest median value was 1, and the condition was LM-M at 5 Hz with A = 2 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We conducted the Wilcoxon signed-rank test with Bonfer- roni correction to compare the results between the stimulus conditions (LM-M, LM-S at 5 Hz, and LM-S at 25 Hz) for each stimulus radius A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results of the LM-M stimulus with A = 2 mm were excluded from the analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The test results showed that at all values of A, the perceived vibratory sensation of the LM-S at 25 Hz was significantly higher than that of the other LM stimuli (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' At A = 3 mm, the vibratory sensation of the LM-S at 5 Hz was significantly higher than that of the LM-M (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results also showed that at A = 3, 4, 5, 6 mm, the perceived movement sensation of the LM-S at 25 Hz was significantly lower than that of the other LM stimuli (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' At A = 3, 4, 5 mm, the movement sensation of the LM-M was significantly lower than that of the LM-S at 5 Hz (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 9 shows these pairs with significant differences as ”*” and ”**” for p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 and p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='005, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' windowName (応答书L)windowName(応答windowName (応答书L)windowName XwindowName (応答书L)windowName XBLANK 6 2 3 4 5 6 Radius [mm] 1 2 3 4 5 6 7 Percived vibration [-] ** ** ** ** ** ** ** ** ** Multi at 5 Hz Single at 5 Hz Single at 25 Hz (a) Evaluated vibration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2 3 4 5 6 Radius [mm] 1 2 3 4 5 6 7 Percived movement [-] ** ** ** ** ** ** ** ** Multi at 5 Hz Single at 5 Hz Single at 25 Hz (b) Evaluated movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Evaluated perceptual stationarity of LM stimulus on a finger pad in experiment 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants evaluated the perceived intensity of the vibratory sensation and the focal movement sensation of the LM stimulus with a seven-point Likert scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2 3 4 5 6 Radius [mm] 1 2 3 4 5 6 7 Answered similarity [-] ** ** ** ** Multi at 5 Hz Flat Convex Concave (a) LM-M at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2 3 4 5 6 Radius [mm] 1 2 3 4 5 6 7 Answered similarity [-] ** ** ** Single at 5 Hz Flat Convex Concave (b) LM-S at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2 3 4 5 6 Radius [mm] 1 2 3 4 5 6 7 Answered similarity [-] ** ** ** ** ** ** ** ** Single at 25 Hz Flat Convex Concave (c) LM-S at 25 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Evaluated perceived curvature in experiment 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The reference images with flat, convex, and concave was presented, and the participants answered perceptual similarity between the perceived tactile shape (curvature) and the image with a seven-point Likert scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, we conducted the Friedman test with Bonferroni correction using stimulus radius A and stimulus type (LM-M, LM-S at 5 Hz, and LM-S at 25 Hz) as factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The test results showed that A and stimulus type had a significant effect on both vibration and movement sensation (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2) Surface Curvature: Box-and-whisker plots of the evalu- ated tactile shape (answers to Q3) with LM-M, LM-S at 5 Hz, and LM-S at 25 Hz are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 10a, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 10b, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 10c, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 13 data of LM-M with A = 2 mm were excluded (Section VI-B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The highest median value for the flat score was 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5, and the condition was LM-S at 25 Hz with A = 4, 5 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The lowest median was 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5, and the condition was LM-S at 5Hz with A = 6 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The highest median for the convex score was 5, and the conditions were LM-M with A = 3, 4 mm and LM-S at 5 Hz with A = 2, 3, 4 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The lowest median was 2, and the condition was LM-S at 25 Hz with A = 5 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The highest median value of the concave score was 5, and the condition was LM-S at 5 Hz with A = 6 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The lowest median was 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5, and the condition was LM-M with A = 3 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We conducted the Wilcoxon signed-rank test with Bonfer- roni correction to compare the score between the shapes (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', flat, convex, concave) at each stimulus condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The test result showed that in the LM-M with A = 3, 4 mm, the flat and convex scores were significantly higher than the concave scores (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' With A = 4 mm, the convex score was significantly higher than the flat score (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the LM- S at 5 Hz with A = 2, 3 mm, the flat score was significantly higher than the concave score (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For A = 2, 3, 4 mm, the convex score was significantly higher than the concave score (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' With A = 2 mm, the convex score was also significantly higher than the flat score (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the LM-S at 25 Hz, all flat scores were significantly higher than the convex scores (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='005).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For A = 2, 3, 4, 5 mm, the flat score was significantly higher than the concave score (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' VII.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' EXPERIMENT2: PERCEIVED SIZE In this experiment, we changed the radius of LM stimulus A and evaluated the perceived stimulus size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' BLANK 7 mm Radius mm mm mm mm (1) (2) (3) (4) (5) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Presented picture to evaluate the perceived size of the presented LM stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The five pictures with different radii (2, 3, 4, 5, and 6 mm) were presented simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants selected one of these images showing the circle whose size matches the perceived haptic size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Procedure Eight males (24–31 age) and (24 and 28 age) two females participated in this experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The experimental setup was the same as that used in Ex- periment 1 (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The tactile stimulus was always presented while the fingertip was touching the marker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The stimulus conditions were identical to those used in Experiment 1, which is explained in Section VI-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 30 experimental trials were conducted (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', 3 different LM stimuli × 5 stimulus radii × 2 sets = 30 experimental trials).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A real-time video of the participants’ fingers was presented to them during the experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The screenshot of the presented video is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this video, a blue circular image corresponding to the trajectory of the LM stimulus is superimposed on the finger pad of the participant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants selected one of the videos showing a circle whose size matched the perceived haptic size to evaluate the perceived size of the presented stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The center of the circular image was changed in real-time to match the center of the presented LM stimulus rcnt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The radii of the circular images were 2, 3, 4, 5, and 6 mm, which were the same as the radii of LM stimuli A used in this experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Five videos with different radii were simultaneously presented to the participant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This video was captured using an RGB camera built into the depth camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Results and Analysis Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 12 presents the confusion matrix for the stimulus size identification results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The highest accuracy was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6, and the stimulus condition was LM-S at 5 Hz with A = 5 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The lowest accuracy was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15, and the condition was LM-S at 25 Hz with A = 2, 4 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Chance rate in this experiment was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2, and accuracy exceeded the chance rate in all conditions, except for the lowest-accuracy condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We compared the perceived size across the stimulus con- dition (LM-M and LM-S at 5 Hz and 25 Hz, respectively).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 13 shows box-and-whisker plots of the perceived stimulus sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The highest perceived stimulus radius was 5 mm, and the condition was LM-S at 5 and 25 Hz with A = 5 mm and all LM stimuli with A = 6 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The lowest radius was 2 mm, and the conditions were LM-M with A = 2, 3 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We applied the Wilcoxon signed-rank test with Bonferroni correction to the results of the perceived size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The test results showed that the perceived radii of the LM-S at 25 Hz were significantly higher than that of the LM-M with A = 3, 4, 5 (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05) and LM-S at 5 Hz with A = 2, 4, 5 mm (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results also showed that the radius of the LM-S at 5 Hz was significantly larger than that of the LM-M with A = 3, 4 mm (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' VIII.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' EXPERIMENT3: EQUIVALENT PHYSICAL STIMULUS This experiment investigated physically static force which is equivalent to the pressure sensation evoked by LM stimulus at a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Physical force was presented by pushing a force gauge against the finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Setup and Stimulus Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 14 illustrated the experimental setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this experi- ment, we used a force gauge whose z-position was automati- cally controlled by a 3-axis stage (QT-AMM3 and ALS-7013- G1MR, CHUO PRECISION INDUSTRIAL Co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', Ltd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=') and the visual-haptic system (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3) used in the other experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This force gauge (IMADA ZTS-50N) can measure forces up to 50 N with a resolution of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='01 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The stimulus condition was the same as that used in Experiment 1, which is explained in Section VI-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' There were 30 experimental trials conducted (3 different LM stimuli × 5 stimulus radii A × 2 sets = 30 experimental trials).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Procedure Eight males (23–28 age) and two females (24 and 28 age) participated in this experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants were instructed to place their index fingers of their right hands on the marker presented by the midair image display.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants were also instructed to place their index fingers of their left hands such that the finger pad faced the tip of the force gauge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' At this point, the force gauge did not touch the finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The force gauge was fixed in midair in a horizontal orientation (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 14).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants grasped the aluminum handle and fixed their finger position by placing it in front of an acrylic auxiliary plate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A plastic cylinder with a radius of 1 cm was attached to the tip of the force gauge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The basal plane of the cylinder was beveled to 1 mm so that the participants did not perceive its edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants wore headphones and listened to white noise during the experiment to avoid hearing the driving noise of the AUTD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A force gauge was pressed against the finger pad of the participant by moving along the z-axis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' After the force gauge reached the specified position (the initial pushing depth was 4 mm), an LM stimulus was presented to the finger pad of the right hand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' After 2 s, the LM stimulus was stopped, and the force gauge returned to its initial position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The force gauge immediately started pushing again, and the LM stimulus was presented again.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This 2 s tactile stimulation was repeated automatically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In this experimental loop, participants compared the physical pushing force with the LM stimulus and orally reported the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Based on the participants’ answers, we changed the pushing depth of the force gauge such that the fingerWindow 口 X (1) (2) (3) (4) (5) 0BLANK 8 2 3 4 5 6 Presented radius [mm] 2 3 4 5 6 Answered radius [mm] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='45 0 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='25 Multi at 5 Hz 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 Accuracy rate [-] 2 3 4 5 6 Presented radius [mm] 2 3 4 5 6 Answered radius [mm] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='25 Single at 5 Hz 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 Accuracy rate [-] 2 3 4 5 6 Presented radius [mm] 2 3 4 5 6 Answered radius [mm] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 Single at 25 Hz 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 Accuracy rate [-] Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Confusion matrix of the stimulus size identification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The chance rate in this experiment was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2 3 4 5 6 Presented radius [mm] 2 3 4 5 6 Perceived radius [mm] ** ** ** Multi at 5 Hz Single at 5 Hz Single at 25 Hz Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Evaluation result of the perceived size of the circular LM stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Repeat automatically Force gauge Auto 3-axis stage Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Setup to evaluate the perceived force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A force gauge was pressed against the finger pad of the left hand, and the LM stimulus was presented to the right finger.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The pushing depth was automatically controlled by the 3-axis stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' These stimuli were terminated after 2 s and automatically repeated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Participants compared the pushing force with the LM stimulus and orally reported the comparison results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1 cm Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Plastic cylinder attached to the tip of the force gauge, used to push a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The radius was 1 cm, and the basal plane of the cylinder was beveled 1 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' perceived intensity of the two stimuli is the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For example, pushing depth in the 2nd stimulus was shortened to weaken the pushing force if the participant answered that the pushing force was stronger than the LM stimulus in the 1st stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The force gauge kept pushing the finger pad and recorded the pushing force for 2 s when the participants reported that the intensities of the two stimuli were the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The median value of the pushing force time series data was finally adopted as the measured force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' After the measurement, the stimulus conditions were changed, and the same procedure was repeated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The adjustment resolution of the pushing depth is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='25 mm and the speed of the force gauge was 5 mm/s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The maximum number of pushing depth adjustments was 20, and all participants completed the experiment within 30 min.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the stimulus comparison, we instructed the participants to ignore the perception at the moment when the LM stimulus and the pushing force were presented to assess the steady-state perceived intensity of the LM stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Results and Analysis In this experiment, the median value of the measured force time series data was adopted as the participant’s answer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The maximum standard deviation (SD) of the time series data, median value, and minimum values were 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='522, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='186, and 0, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The maximum, median, and minimum values were answered by a different participant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 16 shows the times series data whose SD is the maximum (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='522) and data whose SD is the median (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='186).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The median force of each time series data is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 16 as a red line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 17 shows the box-and-whisker plots of the pushing forces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Outliers were calculated using eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 7, and are plotted BLANK 9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='75 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='25 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='50 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='75 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='00 Time [s] 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 Force [N] Data with medium SD Median Data with maximum SD Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Time series data of measured force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We calculated the standard deviation (SD) of each recorded time series data, and the data with the maximum SD (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='522) and with the median value of the SD (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='186) was plotted in this figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We also plotted the median value of these plotted time series force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2 3 4 5 6 Radius [mm] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 Perceived Force [N] ** ** ** Multi at 5 Hz Single at 5 Hz Single at 25 Hz Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Physically static pushing force perceptually equal to the intensity of the LM stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' as white dots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' One participant was unable to perceive the LM at 5 Hz with A = 2 mm;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' thus, this value was plotted as 0 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The forces lower than 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='01 N, which is the lowest measurable force of the force gauge, were also plotted as 0 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results showed that the highest median value of the perceived force was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='53 N, and the stimulus condition was LM-S at 25 Hz with A = 4 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The lowest median value was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='16 N, and the condition was LM-M at 5 Hz with A = 2 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We also conducted the Wilcoxon signed-rank test with Bonferroni correction to compare the perceived force between the stimulus conditions (LM-M, LM-S at 5 Hz, and LM-S at 25 Hz) at each stimulus radius A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The test results showed that with A = 2, 3, 4, 6 mm, the perceived force of the LM-S stimulus at 25 Hz was significantly higher than that of the LM-M stimulus (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For A = 4, 6 mm, the perceived force of the LM-S stimulus at 25 Hz was significantly higher than that of the LM-S stimulus at 5 Hz (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For A = 2, 3 mm, the perceived force of the LM-S stimulus at 5 Hz was significantly higher than that of the LM-M stimulus (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' DISCUSSION A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Static Pressure Sensation at Finger Pad The results of Experiment 1 showed that LM at 5 Hz (including both LM-M and LM-S) can produce a non-vibratory pressure sensation on a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, with stimulus radii of A = 2, 3 mm, the movement sensations were barely perceivable, and the pressure sensation was well static.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The vibration sensation of the LM stimulus at 5 Hz was 4 or less in all conditions except LM-M with A = 5 mm, which was significantly lower than that of the LM-S at 25 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For A = 2, 3, the movement sensations of the LM-M were 2 or less.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results of Experiment 3 also showed that the perceived intensity of the pressure sensation on the finger pad was perceptually comparable to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='16 N or more physical contact force on average.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' With the lowest vibration and movement sensation (LM-M with A = 3 mm), the perceived force was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='24 N, which was 12 times the radiation pressure at the focus presented in the setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, in Experiment 3, extremely low and high forces were identified causing large variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For the LM-M with A = 3 mm, the minimum and maximum values were 0 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='22 N, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Note that the participant who answered 0 N could perceive the LM-M stimulus with A = 3 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Since the answered equivalent force is less than 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='01 N, which is the measurable minimum force of the force gauge, the force is recorded as 0 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This large difference in perceived force could be attributed to the individual differences in the tactile receptor-adaptation speed to the pushing stimulus presented by the force gauge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The pushing force is static, and the perceived intensity of such stimulus gradually weakens with stimulus duration owing to SA-I (slowly-adaptive type I) tactile receptor adaptation [35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In Experiment 3, the contact time with the force gauge was controlled for 2 s to prevent this adaptation effect, and participants were instructed to ignore the perception of the moment of the contact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, if the adaptation speed greatly differs among participants, even under this control, there could be a large difference in the answered equivalent pushing force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For example, we considered that the adaptation speed of the participants answered an extremely high force was fast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' When the adaptation speed is fast, the perceived intensity of the contact force rapidly weakens over the period of 2 s, resulting in a high pushing force as the equivalent force.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Conversely, the adaptation speed of the participants answering an extremely low force could be slow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The evaluation of the individual differences in adaptation speed is important for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The experimental results also indicated that the perceived intensity of the LM-M stimulus with A = 2 mm was extremely weak.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In Experiments 1 and 2, eight participants could not perceive the LM-M stimulus with A = 2 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We considered that the weakness is because the circumference with a 2 mm radius and the length of the curved line-shaped stimulus distribution used in LM-M (9 mm) were almost the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' As an exception, in Experiment 3, only one participant could not perceive the LM-M stimulus with A = 2 mm, and the average perceived force was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='16 N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This difference could be attributed to the difference in the presentation time of the LM stimulus [35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In Experiment 3, the stimulus duration was 2 s, but in Experiments 1 and 2, the participants continued to be presented with the LM stimulus without any time limit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Therefore, in most participants in Experiments 1 and 2, BLANK 10 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 y [mm] LM-M A = 2 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 LM-M A = 3 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 LM-M A = 4 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 LM-M A = 5 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 LM-M A = 6 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 Radiaton pressure [-] 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 y [mm] LM-S A = 2 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 LM-S A = 3 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 x [mm] 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 LM-S A = 4 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 LM-S A = 5 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 LM-S A = 6 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 Radiaton pressure [-] Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Simulated time-averaged radiation pressure distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' These values were normalized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 x [mm] 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 y [mm] LM-M A = 3 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 Power spectrum of 5 Hz [-] 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 x [mm] 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 y [mm] LM-S A = 3 mm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='0 Power spectrum of 5 Hz [-] Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Simulated 5 Hz-power spectrum distribution of time variation of radiation pressure produced by LM-M and LM-S at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The power spectrum distribution was obtained by simulating the time variation of the radiation pressure at each point in the stimulus area and Fourier transforming the time variation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' These values were normalized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' their SA-I tactile receptors completely adapted to the LM-M stimulus, and they could not perceive the stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Perceived Curvature In Experiments 1 and 2, since eight participants could not perceive the LM-M stimulus with a radius of 2 mm, we excluded it from the following discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results of Experiments 1 and 2 suggest that a circular LM stimulus with A = 2–4 mm can render a contact sensation with a convex surface with radii of 2–4 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' As described in Section IX-A, particularly for the A = 2, 3 mm, the contact sensation was well static.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the LM at 5 Hz with A = 2– 4 mm, the convex score was significantly higher than the concave score (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In LM-M with A = 4 mm and LM-S with A = 2, 4 mm, the convex score was significantly higher than the flat score (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The perceived radii for LM-M with A = 3 mm and LM-S with A = 4 mm were 2 and 4, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The comments of participants also suggest that convex sensation was rendered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Four participants commented that they sometimes felt in contact with sharp or rounded objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Based on the authors’ subjective view, we felt the LM-M and LM-S stimuli at 5 Hz with A = 3 mm as a contact sensation with a rounded convex surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, in some cases, participants found it difficult to determine whether the perceived contact shape was convex or flat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Two of the participants commented that this determina- tion was difficult.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Moreover, no significant differences were observed between the convex scores for the LM-S at 5 Hz with A = 3 mm and LM-M with A = 3 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the future, we will quantitatively evaluate the curvature of the perceived surface and explore a control method for the curvature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the LM at 5 Hz with A = 2–4 mm, all concave scores were less than 2, and a concave sensation was not perceived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We considered that the periphery of the LM was hardly per- ceived in the radius range as three participants commented that they had high concave scores when they strongly perceived the perimeter of the stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The characteristics of the time- averaged radiation pressure distribution of the LM stimulus were also consistent with this consideration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 18 shows the simulated time-averaged pressure distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The simulation setup is the same that shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results indicates that the periphery of the LM stimulus is the peak of the time- averaged radiation pressure only above a radius of 5 mm, where the concave score is high.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Finally, we compared the perceived curvature to the 5 Hz- vibration intensity distribution produced by the LM stimulus at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 19 shows the simulated distribution of the 5 Hz vibration intensity (power spectrum of 5 Hz) produced by LM- M and LM-S at 5 Hz with A = 3 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The power spectrum distribution was obtained by simulating the time variation of the radiation pressure at each point in the stimulus area and Fourier transforming the time variation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The simulation setup is the same that shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The simulation results showed that the physical intensity of the 5 Hz vibration was the highest on the focal orbits and does not match the perceived stimulus shape (perceived curvature).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' With A = 3 mm, the LM-M and LM-S at 5 Hz were perceived as contact with a convex surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' However, even under these conditions, the peaks of vibration intensity formed a circle, which is a contact shape with concave.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the future, we will investigate the relationship between perceived curvature and vibration intensity distribu- BLANK 11 tion by measuring or simulating skin displacement generated by the LM stimulus as in previous studies [36], [37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Comparison of LM-M and LM-S at 5 Hz The results of Experiment 1 showed that the curved line- shaped pressure distribution, which consists of four ultrasound foci and is used in LM-M, can suppress the movement sensation of low-frequency LM stimuli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' With A = 3, 4, 5, the movement sensation of the LM-M was significantly lower than that of the LM-S at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We considered that the reason for the suppression of motion perception was that the simultaneously stimulated area of LM-M was wider than that of LM-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The LM-M stimulus was perceived to be smaller than the LM-S stimulus at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For A = 3, 4 mm, the perceived size of LM-M was significantly smaller than that of LM-S at 5 Hz (p < 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The trend in perceived size is consistent with the difference in the size of the time-averaged radiation pressure distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The simulation results shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 18 indicate that the time-averaged distributions of LM-M with A = 3, 4 mm were smaller than those of LM-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In terms of vibratory sensation and perceived shape (cur- vature), there were no huge differences between LM-M and LM-S at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Except for A = 3 mm, there were no significant differences in vibration sensations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' For A = 3, 4, 5 mm, the convex scores was higher than the flat and concave scores for both the M-M and LM-S at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Comparison of Movement Sense Between LM Frequencies The results of Experiment 1 showed that the movement sensation of the LM at 25 Hz was lower than that of the LM at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' At A = 3, 4, 5, 6 mm, the movement sensation of the LM-S at 25 Hz was significantly lower than that of LM-M and LM-S at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We considered that this was because the focus speed at f LM = 25 Hz was too fast for the participants to perceive movement different from the vibration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This results consisted with the previous study [38].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' They presented circular STM stimuli with a diameter of 4–7 cm on the palm and found that the focal movement can not be perceived when the movement speed of the focus was above 18 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The results also showed that rendering a convex surface was difficult with the vibratory sensation produced by focused ultrasound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' As the vibration score of LM-S at 25 Hz was 6 or higher, this stimulus evoked a vibratory sensation in the experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the LM at 25 Hz, the flat score is the highest for all radii and was significantly greater than the convex score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' One participant commented that the contact shape often felt flat when vibration was perceived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' CONCLUSION In this study, we verified that ultrasound radiation pres- sure distribution, which spatiotemporally varies at 5 Hz, can provide a static pressure sensation on a finger pad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' We also demonstrated that the pressure sensation on the finger pad was perceived as a static contact sensation with a convex surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the experiment, four ultrasound focal points were presented on the finger pads of the participant and they were simultaneously rotated in a circle at 5 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' When the radius of the focal trajectory was 3 mm, the perceived vibration and movement sensations were the lowest, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='5 and 2 out of 7 on average, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The perceived intensity of this evoked pressure sensation was equivalent to a 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='24 N physically constant force lasting for 2 s, which is 12 times the physically presented radiation force at the focus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Under the most static condition, the pressure sensation was perceived as a contact sensation on a convex surface with a radius of 2 mm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' The average perceptual similarity was 5 out of 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' From these results, we conclude that focused ultrasound can render a static contact sensation at a finger pad with a small convex surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' This contact sensation rendering enables the noncontact tactile reproduction of a static-fine uneven surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' In the future, we will investigate curvature control of the rendered convex surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' REFERENCES [1] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Rakkolainen, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Freeman, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Sand, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Raisamo, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Brewster, “A survey of mid-air ultrasound haptics and its applications,” IEEE Transactions on Haptics, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [2] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Iwamoto and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Ultrasound tactile display for stress field reproduction-examination of non-vibratory tactile apparent movement,” in First Joint Eurohaptics Conference and Symposium on Haptic Inter- faces for Virtual Environment and Teleoperator Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' World Haptics Conference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2005, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 220–228.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [3] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Hoshi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Takahashi, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Iwamoto, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Noncontact tactile display based on radiation pressure of airborne ultrasound,” IEEE Transactions on Haptics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 155–165, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [4] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Carter, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Seah, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Long, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Drinkwater, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Subramanian, “Ultrahaptics: multi-point mid-air haptic feedback for touch surfaces,” in Proceedings of the 26th annual ACM symposium on User interface software and technology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' ACM, 2013, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 505–514.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [5] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Yosioka and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Kawasima, “Acoustic radiation pressure on a com- pressible sphere,” Acta Acustica united with Acustica, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 5, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 167–173, 1955.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [6] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Suzuki, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fujiwara, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Midair hand guidance by an ultrasound virtual handrail,” in 2019 IEEE World Haptics Conference (WHC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 271–276.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [7] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Yoshimoto, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Hasegawa, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Midair haptic pursuit,” IEEE transactions on haptics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 12, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 652–657, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [8] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Freeman, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Vo, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Brewster, “Haptiglow: Helping users position their hands for better mid-air gestures and ultrasound haptic feedback,” in 2019 IEEE World Haptics Conference (WHC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 289–294.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [9] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Monnai, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Hasegawa, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fujiwara, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Yoshino, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Inoue, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Haptomime: mid-air haptic interaction with a floating virtual screen,” in Proceedings of the 27th annual ACM symposium on User interface software and technology, 2014, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 663–667.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [10] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Romanus, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frish, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Maksymenko, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frier, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Corenthy, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Georgiou, “Mid-air haptic bio-holograms in mixed reality,” in 2019 IEEE international symposium on mixed and augmented reality adjunct (ISMAR-Adjunct).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 348–352.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [11] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Morisaki, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fujiwara, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Midair haptic- optic display with multi-tactile texture based on presenting vibration and pressure sensation by ultrasound,” in SIGGRAPH Asia 2021 Emerging Technologies, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1–2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [12] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Furuyama, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Inoue, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Haptoclone (haptic- optical clone) for mutual tele-environment by real-time 3d image transfer with midair force feedback.” in CHI, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1980–1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [13] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Morisaki, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fujiwara, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Non-vibratory pressure sensation produced by ultrasound focus moving laterally and repetitively with fine spatial step width,” IEEE Transactions on Haptics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 15, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 441–450, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [14] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Johansson and ˚A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Vallbo, “Tactile sensory coding in the glabrous skin of the human hand,” Trends in neurosciences, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 6, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 27–32, 1983.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [15] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Bolanowski Jr, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Gescheider, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Verrillo, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Checkosky, “Four channels mediate the mechanical aspects of touch,” The Journal of the Acoustical society of America, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 84, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1680–1694, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' BLANK 12 [16] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Hasegawa and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Aerial vibrotactile display based on mul- tiunit ultrasound phased array,” IEEE transactions on haptics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 11, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 367–377, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [17] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Takahashi, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Hasegawa, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Lateral modulation of midair ultrasound focus for intensified vibrotactile stimuli,” in Inter- national Conference on Human Haptic Sensing and Touch Enabled Computer Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Springer, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 276–288.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [18] ——, “Tactile stimulation by repetitive lateral movement of midair ultrasound focus,” IEEE transactions on haptics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 13, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 334–342, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [19] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frier, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Ablart, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Chilles, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Long, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Giordano, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Obrist, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Subramanian, “Using spatiotemporal modulation to draw tactile patterns in mid-air,” in International Conference on Human Haptic Sensing and Touch Enabled Computer Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Springer, 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 270–281.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [20] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Howard, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Gallagher, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' L´ecuyer, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Pacchierotti, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Marchal, “Investigating the recognition of local shapes using mid-air ultrasound haptics,” in 2019 IEEE World Haptics Conference (WHC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 503–508.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [21] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Somei, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Morisaki, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Toide, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fujiwara, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shin- oda, “Spatial resolution of mesoscopic shapes presented by airborne ultrasound,” in International Conference on Human Haptic Sensing and Touch Enabled Computer Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Springer, 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 243–251.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [22] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frier, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Pittera, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Ablart, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Obrist, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Subramanian, “Sam- pling strategy for ultrasonic mid-air haptics,” in Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1–11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [23] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Korres and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Eid, “Haptogram: Ultrasonic point-cloud tactile stimulation,” IEEE Access, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 7758–7769, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [24] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Hajas, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Pittera, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Nasce, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Georgiou, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Obrist, “Mid-air haptic rendering of 2d geometric shapes with a dynamic tactile pointer,” IEEE transactions on haptics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 13, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 806–817, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [25] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Marti, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Parlangeli, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Recupero, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Guidi, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Sirizzotti, “Mid- air haptics for shape recognition of virtual objects,” Ergonomics, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1–19, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [26] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Mulot, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Gicquel, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Zanini, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frier, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Marchal, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Pacchierotti, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Howard, “Dolphin: A framework for the design and perceptual evaluation of ultrasound mid-air haptic stimuli,” in ACM Symposium on Applied Perception 2021, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1–10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [27] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Mulot, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Gicquel, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frier, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Marchal, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Pacchierotti, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Howard, “Curvature discrimination for dynamic ultrasound mid-air haptic stimuli,” in 2021 IEEE World Haptics Conference (WHC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1145–1145.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [28] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Inoue, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Active touch perception produced by airborne ultrasonic haptic hologram,” in 2015 IEEE World Haptics Conference (WHC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 362–367.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [29] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Long, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Seah, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Carter, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Subramanian, “Rendering vol- umetric haptic shapes in mid-air using ultrasound,” ACM Transactions on Graphics (TOG), vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 33, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 6, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1–10, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [30] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Matsubayashi, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Direct finger manipula- tion of 3d object image with ultrasound haptic feedback,” in Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1–11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [31] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Matsubayashi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Oikawa, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Mizutani, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Display of haptic shape using ultrasound pressure distribution forming cross-sectional shape,” in 2019 IEEE World Haptics Conference (WHC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 419–424.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [32] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Inoue, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Scalable architecture for airborne ultrasound tactile display,” in International AsiaHaptics conference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Springer, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 99–103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [33] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Suzuki, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Inoue, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Fujiwara, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Makino, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Shinoda, “Autd3: Scalable airborne ultrasound tactile display,” IEEE Transactions on Haptics, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [34] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' of Advanced Industrial Science and Technology, “Icam: Identifi- cation code of anthropometric measurements,” 2011, https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='airc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' aist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='go.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='jp/dhrt/hand/data/list.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='html.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [35] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Vallbo, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Johansson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=', “Properties of cutaneous mechanore- ceptors in the human hand related to touch sensation,” Hum neurobiol, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 3–14, 1984.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [36] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Chilles, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frier, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Abdouni, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Giordano, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Georgiou, “Laser doppler vibrometry and fem simulations of ultrasonic mid-air haptics,” in 2019 IEEE World Haptics Conference (WHC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' IEEE, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 259–264.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [37] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Frier, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Abdouni, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Pittera, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Georgiou, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Malkin, “Simulat- ing airborne ultrasound vibrations in human skin for haptic applications,” IEEE Access, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 10, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 15 443–15 456, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' [38] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Freeman and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Wilson, “Perception of ultrasound haptic focal point motion,” in Proceedings of the 2021 International Conference on Multimodal Interaction, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 697–701.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Tao Morisaki Tao Morisaki is a Ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' student with the Graduate School of Frontier Sciences, the University of Tokyo, since 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' He received the M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' degree from the Department of Complexity Science and Engineering from the University of Tokyo, Chiba, Japan, in 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' His research inter- ests include haptics, ultrasound midair haptics, and human-computer interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' He is a member of VRSJ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Masahiro Fujiwara He is a project assistant pro- fessor in the Graduate School of Frontier Sciences, the University of Tokyo, Japan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' He received the BS degree in Engineering, the MS degree and the PhD degree in Information Science and Technology from the University of Tokyo, in 2010, 2012, and 2015, re- spectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' His research interests include information physics, haptics, non-contact sensing and application systems related to them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' He is a member of IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Yasutoshi Makino Yasutoshi Makino is an asso- ciate professor in the Department of Complexity Science and Engineering in the University of Tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' He received his PhD in Information Science and Technology from the Univ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' of Tokyo in 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' He worked as a researcher for two years in the Univ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' of Tokyo and an assistant professor in Keio University from 2009 to 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' From 2013 he moved to the Univ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' of Tokyo as a lecture, and he is an associate professor from 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' His research interest includes haptic interactive systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' Hiroyuki Shinoda Hiroyuki Shinoda is a Professor at the Graduate School of Frontier Sciences, the University of Tokyo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' After receiving a Ph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' in engineering from the University of Tokyo, he was an Associate Professor at Tokyo University of Agri- culture and Technology from 1995 to 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' He was a Visiting Scholar at UC Berkeley in 1999 and was an Associate Professor at the University of Tokyo from 2000 to 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' His research interests include information physics, haptics, mid-air haptics, two- dimensional communication, and their application systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' He is a member of SICE, IEEJ, RSJ, JSME, VRSJ, IEEE and ACM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'} +page_content=' 又怡电' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ztFJT4oBgHgl3EQfjCwq/content/2301.11572v1.pdf'}